repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
ODPP | ODPP-main/ODPP_Relation/option_agent/VIC.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import prior, policy, value, decoder
from utils.summary_tools import write_summary, write_hist
from option_agent.base_option_agent import Option_Agent
cl... | 14,687 | 52.802198 | 172 | py |
ODPP | ODPP-main/ODPP_Relation/option_agent/DCO.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value
from utils.summary_tools import write_summary
from spectral_DPP_agent.laprepr import LapReprLearner
from option_agent.hierarchical_po... | 12,888 | 50.146825 | 180 | py |
ODPP | ODPP-main/ODPP_Relation/option_agent/base_option_agent.py | import os
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from utils.summary_tools import write_summary
from option_agent.hierarchical_policy import get_final_state_value, get_return_array, get_advantage
class Option_Agent(object):
def __init__(self, args):
... | 10,002 | 50.035714 | 156 | py |
ODPP | ODPP-main/ODPP_Relation/option_agent/ODPP.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value, rnn_decoder, prior
from utils.summary_tools import write_summary
from spectral_DPP_agent.laprepr import LapReprLearner
from spectral... | 20,810 | 52.775194 | 180 | py |
ODPP | ODPP-main/ODPP_Relation/option_agent/DIAYN.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value, decoder
from utils.summary_tools import write_summary
from option_agent.base_option_agent import Option_Agent
class DIAYN_Agent(Op... | 10,533 | 53.864583 | 183 | py |
ODPP | ODPP-main/ODPP_Relation/utils/buffer.py | import torch as th
import numpy as np
from types import SimpleNamespace as SN
class OneHot(object):
def __init__(self, out_dim):
self.out_dim = out_dim
def transform(self, tensor): # check
y_onehot = tensor.new(*tensor.shape[:-1], self.out_dim).zero_()
y_onehot.scatter_(-1, tensor.lon... | 7,036 | 40.394118 | 159 | py |
ODPP | ODPP-main/ODPP_Relation/utils/torch_tools.py | import numpy as np
import torch
def to_tensor(x, device):
"""return a torch.Tensor, assume x is an np array."""
if x.dtype in [np.float32, np.float64]:
return torch.tensor(x, dtype=torch.float32, device=device)
elif x.dtype in [np.int32, np.int64, np.uint8]:
return torch.tensor(x, dtype=tor... | 421 | 37.363636 | 66 | py |
ODPP | ODPP-main/ODPP_Relation/utils/summary_tools.py | import numpy as np
def get_summary_str(step=None, info=None, prefix=''):
summary_str = prefix
if step is not None:
summary_str += 'Step {}; '.format(step)
for key, val in info.items():
if isinstance(val, (int, np.int32, np.int64)):
summary_str += '{} {}; '.format(key, val)
... | 834 | 35.304348 | 85 | py |
ODPP | ODPP-main/ODPP_Relation/learner/value.py | import torch.nn as nn
from learner.base_mlp import MLP
# TODO: share layer with the policy netwrok
class ValueFuntion(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(ValueFuntion, self).__init__()
self.mlp = MLP(layers=[input_dim, hidden_dim, hidden_dim, 1])
def forward(self, x):... | 385 | 28.692308 | 69 | py |
ODPP | ODPP-main/ODPP_Relation/learner/base_mlp.py | import torch
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, layers, activation=torch.tanh, output_activation=None, init=True): # TODO:relu
super(MLP, self).__init__()
self.layers = nn.ModuleList()
self.activation = activation
self.output_activation = output_activatio... | 1,136 | 34.53125 | 101 | py |
ODPP | ODPP-main/ODPP_Relation/learner/rnn_decoder.py | import torch
import torch.nn as nn
from torch.distributions.categorical import Categorical
class RNN_Decoder(nn.Module):
def __init__(self, input_dim, hidden_dim, code_dim):
super(RNN_Decoder, self).__init__()
self.lstm = nn.LSTM(input_size=input_dim, hidden_size=hidden_dim, batch_first=True, bidir... | 1,253 | 40.8 | 111 | py |
ODPP | ODPP-main/ODPP_Relation/learner/prior.py | import torch.nn as nn
from torch.distributions.categorical import Categorical
from learner.base_mlp import MLP
class Prior(nn.Module):
def __init__(self, input_dim, hidden_dim, code_dim, is_high=False):
super(Prior, self).__init__()
self.is_high = is_high
self.mlp = MLP(layers=[input_dim,... | 1,189 | 33 | 91 | py |
ODPP | ODPP-main/ODPP_Relation/learner/policy.py | import torch
import torch.nn as nn
from torch.distributions.normal import Normal
from torch.distributions.categorical import Categorical
import numpy as np
from learner.base_mlp import MLP
class GaussianPolicy(nn.Module):
def __init__(self, input_dim, hidden_dim, action_dim, output_activation=None, act_range=None... | 1,980 | 36.377358 | 153 | py |
ODPP | ODPP-main/ODPP_Relation/learner/decoder.py | import torch.nn as nn
from torch.distributions.categorical import Categorical
from learner.base_mlp import MLP
class Decoder(nn.Module):
def __init__(self, input_dim, hidden_dim, code_dim):
super(Decoder, self).__init__()
self.mlp = MLP(layers=[input_dim, hidden_dim, hidden_dim, code_dim])
de... | 861 | 29.785714 | 76 | py |
ODPP | ODPP-main/ODPP_Relation/spectral_DPP_agent/laprepr.py | import os
import collections
import numpy as np
from tqdm import tqdm
import torch
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from configs import get_laprepr_args
from utils import torch_tools, timer_tools, summary_tools
from spectral_DPP_agent.spectral_buffer import EpisodicReplayBuffer... | 14,097 | 40.464706 | 136 | py |
ODPP | ODPP-main/ODPP_Atari/main.py | import os
import gym
import torch
import datetime
import random
import numpy as np
from configs import get_common_args
from runner import Runner
from hierarchical_runner import HierRunner
from spectral_DPP_agent.laprepr import LapReprLearner
from utils.env_wrapper import EnvWrapper
def main():
# prepare
args =... | 2,206 | 32.439394 | 134 | py |
ODPP | ODPP-main/ODPP_Atari/hierarchical_runner.py | import os
import torch
import numpy as np
from tqdm import tqdm
import torch.nn.functional as F
from configs import get_hierarchical_args
from option_agent.hierarchical_policy import HierPolicy
from utils.buffer import OneHot, EpisodeBatch, ReplayBuffer
from runner import Runner
class HierRunner(object):
def __ini... | 15,283 | 51.522337 | 163 | py |
ODPP | ODPP-main/ODPP_Atari/runner.py | import os
import torch
from tqdm import tqdm
import numpy as np
from configs import get_rl_args
from option_agent import REGISTRY as agent_REGISTRY
from utils.buffer import OneHot, EpisodeBatch, ReplayBuffer
from spectral_DPP_agent.laprepr import LapReprLearner
from visualization.draw_trajectory import draw_traj
class... | 13,093 | 47.858209 | 148 | py |
ODPP | ODPP-main/ODPP_Atari/option_agent/ODPP_bk.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value, rnn_decoder, prior
from utils.summary_tools import write_summary
from spectral_DPP_agent.laprepr import LapReprLearner
from spectral... | 20,928 | 52.802057 | 180 | py |
ODPP | ODPP-main/ODPP_Atari/option_agent/hierarchical_policy.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import prior, value, policy
from utils.summary_tools import write_summary
def get_final_state_value(args, target_v, horizons):
target_v_array = targ... | 14,848 | 46.9 | 197 | py |
ODPP | ODPP-main/ODPP_Atari/option_agent/VALOR.py | import os, math
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value, rnn_decoder
from utils.summary_tools import write_summary
from option_agent.base_option_agent import Option_Agent
class VALOR... | 11,004 | 52.682927 | 167 | py |
ODPP | ODPP-main/ODPP_Atari/option_agent/VIC.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import prior, policy, value, decoder
from utils.summary_tools import write_summary, write_hist
from option_agent.base_option_agent import Option_Agent
cl... | 14,805 | 52.84 | 172 | py |
ODPP | ODPP-main/ODPP_Atari/option_agent/DCO.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value
from utils.summary_tools import write_summary
from spectral_DPP_agent.laprepr import LapReprLearner
from option_agent.hierarchical_po... | 13,044 | 50.156863 | 180 | py |
ODPP | ODPP-main/ODPP_Atari/option_agent/base_option_agent.py | import os
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from utils.summary_tools import write_summary
from option_agent.hierarchical_policy import get_final_state_value, get_return_array, get_advantage
class Option_Agent(object):
def __init__(self, args):
... | 10,063 | 49.828283 | 156 | py |
ODPP | ODPP-main/ODPP_Atari/option_agent/ODPP.py | import os
import torch
import math
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value, rnn_decoder, prior
from utils.summary_tools import write_summary
from spectral_DPP_agent.laprepr import LapReprLearner
f... | 21,208 | 52.966921 | 180 | py |
ODPP | ODPP-main/ODPP_Atari/option_agent/DIAYN.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value, decoder
from utils.summary_tools import write_summary
from option_agent.base_option_agent import Option_Agent
class DIAYN_Agent(Op... | 10,659 | 53.948454 | 183 | py |
ODPP | ODPP-main/ODPP_Atari/utils/buffer.py | import torch as th
import numpy as np
from types import SimpleNamespace as SN
class OneHot(object):
def __init__(self, out_dim):
self.out_dim = out_dim
def transform(self, tensor): # check
y_onehot = tensor.new(*tensor.shape[:-1], self.out_dim).zero_()
y_onehot.scatter_(-1, tensor.lon... | 7,036 | 40.394118 | 159 | py |
ODPP | ODPP-main/ODPP_Atari/utils/torch_tools.py | import numpy as np
import torch
def to_tensor(x, device):
"""return a torch.Tensor, assume x is an np array."""
if x.dtype in [np.float32, np.float64]:
return torch.tensor(x, dtype=torch.float32, device=device)
elif x.dtype in [np.int32, np.int64, np.uint8]:
return torch.tensor(x, dtype=tor... | 421 | 37.363636 | 66 | py |
ODPP | ODPP-main/ODPP_Atari/utils/summary_tools.py | import numpy as np
def get_summary_str(step=None, info=None, prefix=''):
summary_str = prefix
if step is not None:
summary_str += 'Step {}; '.format(step)
for key, val in info.items():
if isinstance(val, (int, np.int32, np.int64)):
summary_str += '{} {}; '.format(key, val)
... | 834 | 35.304348 | 85 | py |
ODPP | ODPP-main/ODPP_Atari/learner/value.py | import torch.nn as nn
from learner.base_mlp import MLP
# TODO: share layer with the policy netwrok
class ValueFuntion(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(ValueFuntion, self).__init__()
self.mlp = MLP(layers=[input_dim, hidden_dim, hidden_dim, 1])
def forward(self, x):... | 385 | 28.692308 | 69 | py |
ODPP | ODPP-main/ODPP_Atari/learner/base_mlp.py | import torch
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, layers, activation=torch.tanh, output_activation=None, init=True): # TODO:relu
super(MLP, self).__init__()
self.layers = nn.ModuleList()
self.activation = activation
self.output_activation = output_activatio... | 1,136 | 34.53125 | 101 | py |
ODPP | ODPP-main/ODPP_Atari/learner/rnn_decoder.py | import torch
import torch.nn as nn
from torch.distributions.categorical import Categorical
class RNN_Decoder(nn.Module):
def __init__(self, input_dim, hidden_dim, code_dim):
super(RNN_Decoder, self).__init__()
self.lstm = nn.LSTM(input_size=input_dim, hidden_size=hidden_dim, batch_first=True, bidir... | 1,253 | 40.8 | 111 | py |
ODPP | ODPP-main/ODPP_Atari/learner/prior.py | import torch.nn as nn
from torch.distributions.categorical import Categorical
from learner.base_mlp import MLP
class Prior(nn.Module):
def __init__(self, input_dim, hidden_dim, code_dim, is_high=False):
super(Prior, self).__init__()
self.is_high = is_high
self.mlp = MLP(layers=[input_dim,... | 1,189 | 33 | 91 | py |
ODPP | ODPP-main/ODPP_Atari/learner/policy.py | import torch
import torch.nn as nn
from torch.distributions.normal import Normal
from torch.distributions.categorical import Categorical
import numpy as np
from learner.base_mlp import MLP
class GaussianPolicy(nn.Module):
def __init__(self, input_dim, hidden_dim, action_dim, output_activation=None, act_range=None... | 1,980 | 36.377358 | 153 | py |
ODPP | ODPP-main/ODPP_Atari/learner/decoder.py | import torch.nn as nn
from torch.distributions.categorical import Categorical
from learner.base_mlp import MLP
class Decoder(nn.Module):
def __init__(self, input_dim, hidden_dim, code_dim):
super(Decoder, self).__init__()
self.mlp = MLP(layers=[input_dim, hidden_dim, hidden_dim, code_dim])
de... | 861 | 29.785714 | 76 | py |
ODPP | ODPP-main/ODPP_Atari/spectral_DPP_agent/laprepr.py | import os
import collections
import numpy as np
from tqdm import tqdm
import torch
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from configs import get_laprepr_args
from utils import torch_tools, timer_tools, summary_tools
from spectral_DPP_agent.spectral_buffer import EpisodicReplayBuffer... | 13,436 | 39.966463 | 132 | py |
ODPP | ODPP-main/ODPP_Atari/spectral_DPP_agent/laprepr_bk.py | import os
import collections
import numpy as np
from tqdm import tqdm
import torch
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from configs import get_laprepr_args
from utils import torch_tools, timer_tools, summary_tools
from spectral_DPP_agent.spectral_buffer import EpisodicReplayBuffer... | 13,324 | 40 | 132 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/main.py | import os
import gym
import torch
import datetime
import random
import numpy as np
from configs import get_common_args
from runner import Runner
from hierarchical_runner import HierRunner
from spectral_DPP_agent.laprepr import LapReprLearner
from utils.env_wrapper import EnvWrapper
import robo_env
def main():
# pr... | 2,149 | 31.575758 | 102 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/hierarchical_runner.py | import os
import torch
import numpy as np
from tqdm import tqdm
import torch.nn.functional as F
from configs import get_hierarchical_args
from option_agent.hierarchical_policy import HierPolicy
from utils.buffer import OneHot, EpisodeBatch, ReplayBuffer
from runner import Runner
class HierRunner(object):
def __ini... | 16,583 | 51.481013 | 165 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/runner.py | import os
import torch
from tqdm import tqdm
import numpy as np
from configs import get_rl_args
from option_agent import REGISTRY as agent_REGISTRY
from utils.buffer import OneHot, EpisodeBatch, ReplayBuffer
from spectral_DPP_agent.laprepr import LapReprLearner
from visualization.draw_trajectory import draw_traj
class... | 9,513 | 48.041237 | 148 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/option_agent/hierarchical_policy.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import prior, value, policy
from utils.summary_tools import write_summary
def get_final_state_value(args, target_v, horizons):
target_v_array = targ... | 15,433 | 47.080997 | 197 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/option_agent/VALOR.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value, rnn_decoder
from utils.summary_tools import write_summary
from option_agent.base_option_agent import Option_Agent
class VALOR_Agent... | 10,865 | 52.527094 | 167 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/option_agent/VIC.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import prior, policy, value, decoder
from utils.summary_tools import write_summary, write_hist
from option_agent.base_option_agent import Option_Agent
cl... | 14,632 | 52.99631 | 172 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/option_agent/DCO.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value
from utils.summary_tools import write_summary
from spectral_DPP_agent.laprepr import LapReprLearner
from option_agent.hierarchical_po... | 12,935 | 50.130435 | 180 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/option_agent/base_option_agent.py | import os
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from utils.summary_tools import write_summary
from option_agent.hierarchical_policy import get_final_state_value, get_return_array, get_advantage
class Option_Agent(object):
def __init__(self, args):
... | 10,002 | 50.035714 | 156 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/option_agent/ODPP.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value, rnn_decoder, prior
from utils.summary_tools import write_summary
from spectral_DPP_agent.laprepr import LapReprLearner
from spectral... | 21,034 | 53.07455 | 180 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/option_agent/DIAYN.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value, decoder
from utils.summary_tools import write_summary
from option_agent.base_option_agent import Option_Agent
class DIAYN_Agent(Op... | 10,567 | 53.756477 | 183 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/utils/buffer.py | import torch as th
import numpy as np
from types import SimpleNamespace as SN
class OneHot(object):
def __init__(self, out_dim):
self.out_dim = out_dim
def transform(self, tensor): # check
y_onehot = tensor.new(*tensor.shape[:-1], self.out_dim).zero_()
y_onehot.scatter_(-1, tensor.lon... | 7,036 | 40.394118 | 159 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/utils/torch_tools.py | import numpy as np
import torch
def to_tensor(x, device):
"""return a torch.Tensor, assume x is an np array."""
if x.dtype in [np.float32, np.float64]:
return torch.tensor(x, dtype=torch.float32, device=device)
elif x.dtype in [np.int32, np.int64, np.uint8]:
return torch.tensor(x, dtype=tor... | 421 | 37.363636 | 66 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/utils/summary_tools.py | import numpy as np
def get_summary_str(step=None, info=None, prefix=''):
summary_str = prefix
if step is not None:
summary_str += 'Step {}; '.format(step)
for key, val in info.items():
if isinstance(val, (int, np.int32, np.int64)):
summary_str += '{} {}; '.format(key, val)
... | 834 | 35.304348 | 85 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/learner/value.py | import torch.nn as nn
from learner.base_mlp import MLP
# TODO: share layer with the policy netwrok
class ValueFuntion(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(ValueFuntion, self).__init__()
self.mlp = MLP(layers=[input_dim, hidden_dim, hidden_dim, 1])
def forward(self, x):... | 385 | 28.692308 | 69 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/learner/base_mlp.py | import torch
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, layers, activation=torch.tanh, output_activation=None, init=True): # TODO:relu
super(MLP, self).__init__()
self.layers = nn.ModuleList()
self.activation = activation
self.output_activation = output_activatio... | 1,136 | 34.53125 | 101 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/learner/rnn_decoder.py | import torch
import torch.nn as nn
from torch.distributions.categorical import Categorical
class RNN_Decoder(nn.Module):
def __init__(self, input_dim, hidden_dim, code_dim):
super(RNN_Decoder, self).__init__()
self.lstm = nn.LSTM(input_size=input_dim, hidden_size=hidden_dim, batch_first=True, bidir... | 1,253 | 40.8 | 111 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/learner/prior.py | import torch.nn as nn
from torch.distributions.categorical import Categorical
from learner.base_mlp import MLP
class Prior(nn.Module):
def __init__(self, input_dim, hidden_dim, code_dim, is_high=False):
super(Prior, self).__init__()
self.is_high = is_high
self.mlp = MLP(layers=[input_dim,... | 1,189 | 33 | 91 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/learner/policy.py | import torch
import torch.nn as nn
from torch.distributions.normal import Normal
from torch.distributions.categorical import Categorical
import numpy as np
from learner.base_mlp import MLP
class GaussianPolicy(nn.Module):
def __init__(self, input_dim, hidden_dim, action_dim, output_activation=None, act_range=None... | 1,981 | 35.703704 | 153 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/learner/decoder.py | import torch.nn as nn
from torch.distributions.categorical import Categorical
from learner.base_mlp import MLP
class Decoder(nn.Module):
def __init__(self, input_dim, hidden_dim, code_dim):
super(Decoder, self).__init__()
self.mlp = MLP(layers=[input_dim, hidden_dim, hidden_dim, code_dim])
de... | 861 | 29.785714 | 76 | py |
ODPP | ODPP-main/ODPP_Downstream_Ant_Room/spectral_DPP_agent/laprepr.py | import os
import collections
import numpy as np
from tqdm import tqdm
import torch
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from configs import get_laprepr_args
from utils import torch_tools, timer_tools, summary_tools
from spectral_DPP_agent.spectral_buffer import EpisodicReplayBuffer... | 13,915 | 40.050147 | 127 | py |
ODPP | ODPP-main/ODPP_Locomotion/main.py | import os
import gym
import torch
import datetime
import random
import numpy as np
from configs import get_common_args
from runner import Runner
from hierarchical_runner import HierRunner
from spectral_DPP_agent.laprepr import LapReprLearner
from utils.env_wrapper import EnvWrapper
import robo_env
def main():
# pr... | 2,151 | 31.606061 | 102 | py |
ODPP | ODPP-main/ODPP_Locomotion/hierarchical_runner.py | import os
import torch
import numpy as np
from tqdm import tqdm
import torch.nn.functional as F
from configs import get_hierarchical_args
from option_agent.hierarchical_policy import HierPolicy
from utils.buffer import OneHot, EpisodeBatch, ReplayBuffer
from runner import Runner
class HierRunner(object):
def __ini... | 15,283 | 51.522337 | 163 | py |
ODPP | ODPP-main/ODPP_Locomotion/runner.py | import os
import torch
from tqdm import tqdm
import numpy as np
from configs import get_rl_args
from option_agent import REGISTRY as agent_REGISTRY
from utils.buffer import OneHot, EpisodeBatch, ReplayBuffer
from spectral_DPP_agent.laprepr import LapReprLearner
from visualization.draw_trajectory import draw_traj
class... | 13,091 | 47.850746 | 148 | py |
ODPP | ODPP-main/ODPP_Locomotion/option_agent/ODPP_bk.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value, rnn_decoder, prior
from utils.summary_tools import write_summary
from spectral_DPP_agent.laprepr import LapReprLearner
from spectral... | 20,928 | 52.802057 | 180 | py |
ODPP | ODPP-main/ODPP_Locomotion/option_agent/hierarchical_policy.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import prior, value, policy
from utils.summary_tools import write_summary
def get_final_state_value(args, target_v, horizons):
target_v_array = targ... | 14,848 | 46.9 | 197 | py |
ODPP | ODPP-main/ODPP_Locomotion/option_agent/VALOR.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value, rnn_decoder
from utils.summary_tools import write_summary
from option_agent.base_option_agent import Option_Agent
class VALOR_Agent... | 10,991 | 52.619512 | 167 | py |
ODPP | ODPP-main/ODPP_Locomotion/option_agent/VIC.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import prior, policy, value, decoder
from utils.summary_tools import write_summary, write_hist
from option_agent.base_option_agent import Option_Agent
cl... | 14,805 | 52.84 | 172 | py |
ODPP | ODPP-main/ODPP_Locomotion/option_agent/DCO.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value
from utils.summary_tools import write_summary
from spectral_DPP_agent.laprepr import LapReprLearner
from option_agent.hierarchical_po... | 13,006 | 50.208661 | 180 | py |
ODPP | ODPP-main/ODPP_Locomotion/option_agent/base_option_agent.py | import os
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from utils.summary_tools import write_summary
from option_agent.hierarchical_policy import get_final_state_value, get_return_array, get_advantage
class Option_Agent(object):
def __init__(self, args):
... | 10,002 | 50.035714 | 156 | py |
ODPP | ODPP-main/ODPP_Locomotion/option_agent/ODPP.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value, rnn_decoder, prior
from utils.summary_tools import write_summary
from spectral_DPP_agent.laprepr import LapReprLearner
from spectral... | 21,133 | 53.051151 | 180 | py |
ODPP | ODPP-main/ODPP_Locomotion/option_agent/DIAYN.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value, decoder
from utils.summary_tools import write_summary
from option_agent.base_option_agent import Option_Agent
class DIAYN_Agent(Op... | 10,659 | 53.948454 | 183 | py |
ODPP | ODPP-main/ODPP_Locomotion/utils/buffer.py | import torch as th
import numpy as np
from types import SimpleNamespace as SN
class OneHot(object):
def __init__(self, out_dim):
self.out_dim = out_dim
def transform(self, tensor): # check
y_onehot = tensor.new(*tensor.shape[:-1], self.out_dim).zero_()
y_onehot.scatter_(-1, tensor.lon... | 7,036 | 40.394118 | 159 | py |
ODPP | ODPP-main/ODPP_Locomotion/utils/torch_tools.py | import numpy as np
import torch
def to_tensor(x, device):
"""return a torch.Tensor, assume x is an np array."""
if x.dtype in [np.float32, np.float64]:
return torch.tensor(x, dtype=torch.float32, device=device)
elif x.dtype in [np.int32, np.int64, np.uint8]:
return torch.tensor(x, dtype=tor... | 421 | 37.363636 | 66 | py |
ODPP | ODPP-main/ODPP_Locomotion/utils/summary_tools.py | import numpy as np
def get_summary_str(step=None, info=None, prefix=''):
summary_str = prefix
if step is not None:
summary_str += 'Step {}; '.format(step)
for key, val in info.items():
if isinstance(val, (int, np.int32, np.int64)):
summary_str += '{} {}; '.format(key, val)
... | 834 | 35.304348 | 85 | py |
ODPP | ODPP-main/ODPP_Locomotion/learner/value.py | import torch.nn as nn
from learner.base_mlp import MLP
# TODO: share layer with the policy netwrok
class ValueFuntion(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(ValueFuntion, self).__init__()
self.mlp = MLP(layers=[input_dim, hidden_dim, hidden_dim, 1])
def forward(self, x):... | 385 | 28.692308 | 69 | py |
ODPP | ODPP-main/ODPP_Locomotion/learner/base_mlp.py | import torch
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, layers, activation=torch.tanh, output_activation=None, init=True): # TODO:relu
super(MLP, self).__init__()
self.layers = nn.ModuleList()
self.activation = activation
self.output_activation = output_activatio... | 1,136 | 34.53125 | 101 | py |
ODPP | ODPP-main/ODPP_Locomotion/learner/rnn_decoder.py | import torch
import torch.nn as nn
from torch.distributions.categorical import Categorical
class RNN_Decoder(nn.Module):
def __init__(self, input_dim, hidden_dim, code_dim):
super(RNN_Decoder, self).__init__()
self.lstm = nn.LSTM(input_size=input_dim, hidden_size=hidden_dim, batch_first=True, bidir... | 1,253 | 40.8 | 111 | py |
ODPP | ODPP-main/ODPP_Locomotion/learner/prior.py | import torch.nn as nn
from torch.distributions.categorical import Categorical
from learner.base_mlp import MLP
class Prior(nn.Module):
def __init__(self, input_dim, hidden_dim, code_dim, is_high=False):
super(Prior, self).__init__()
self.is_high = is_high
self.mlp = MLP(layers=[input_dim,... | 1,189 | 33 | 91 | py |
ODPP | ODPP-main/ODPP_Locomotion/learner/policy.py | import torch
import torch.nn as nn
from torch.distributions.normal import Normal
from torch.distributions.categorical import Categorical
import numpy as np
from learner.base_mlp import MLP
class GaussianPolicy(nn.Module):
def __init__(self, input_dim, hidden_dim, action_dim, output_activation=None, act_range=None... | 1,980 | 36.377358 | 153 | py |
ODPP | ODPP-main/ODPP_Locomotion/learner/decoder.py | import torch.nn as nn
from torch.distributions.categorical import Categorical
from learner.base_mlp import MLP
class Decoder(nn.Module):
def __init__(self, input_dim, hidden_dim, code_dim):
super(Decoder, self).__init__()
self.mlp = MLP(layers=[input_dim, hidden_dim, hidden_dim, code_dim])
de... | 861 | 29.785714 | 76 | py |
ODPP | ODPP-main/ODPP_Locomotion/spectral_DPP_agent/laprepr.py | import os
import collections
import numpy as np
from tqdm import tqdm
import torch
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from configs import get_laprepr_args
from utils import torch_tools, timer_tools, summary_tools
from spectral_DPP_agent.spectral_buffer import EpisodicReplayBuffer... | 13,324 | 40 | 132 | py |
ODPP | ODPP-main/ODPP_Locomotion/spectral_DPP_agent/laprepr_bk.py | import os
import collections
import numpy as np
from tqdm import tqdm
import torch
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from configs import get_laprepr_args
from utils import torch_tools, timer_tools, summary_tools
from spectral_DPP_agent.spectral_buffer import EpisodicReplayBuffer... | 13,324 | 40 | 132 | py |
ODPP | ODPP-main/ODPP_Prior/main.py | import os
import gym
import torch
import datetime
import random
import numpy as np
from configs import get_common_args
from runner import Runner
from hierarchical_runner import HierRunner
from spectral_DPP_agent.laprepr import LapReprLearner
from visualization.draw_option_dist import draw_option_ori_dist, draw_option_c... | 2,335 | 32.855072 | 102 | py |
ODPP | ODPP-main/ODPP_Prior/hierarchical_runner.py | import os
import torch
import numpy as np
from tqdm import tqdm
import torch.nn.functional as F
from configs import get_hierarchical_args
from option_agent.hierarchical_policy import HierPolicy
from utils.buffer import OneHot, EpisodeBatch, ReplayBuffer
from runner import Runner
class HierRunner(object):
def __ini... | 15,987 | 51.592105 | 165 | py |
ODPP | ODPP-main/ODPP_Prior/runner.py | import os
import torch
from tqdm import tqdm
import numpy as np
from configs import get_rl_args
from option_agent import REGISTRY as agent_REGISTRY
from utils.buffer import OneHot, EpisodeBatch, ReplayBuffer
from spectral_DPP_agent.laprepr import LapReprLearner
from visualization.draw_trajectory import draw_traj
class... | 11,810 | 48.62605 | 148 | py |
ODPP | ODPP-main/ODPP_Prior/visualization/draw_option_dist.py | import torch
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from runner import Runner
def get_option_orientation(args, env, runner: Runner):
env.set_sample_inits(False)
ori_list = {}
traj_num = 100
traj_len = 50
for c_id in range(args.code_dim):
c = torch.tensor([... | 3,832 | 32.920354 | 99 | py |
ODPP | ODPP-main/ODPP_Prior/option_agent/hierarchical_policy.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import prior, value, policy
from utils.summary_tools import write_summary
def get_final_state_value(args, target_v, horizons):
target_v_array = targ... | 15,360 | 47.153605 | 197 | py |
ODPP | ODPP-main/ODPP_Prior/option_agent/VALOR.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value, rnn_decoder
from utils.summary_tools import write_summary
from option_agent.base_option_agent import Option_Agent
class VALOR_Agent... | 10,865 | 52.527094 | 167 | py |
ODPP | ODPP-main/ODPP_Prior/option_agent/VIC.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import prior, policy, value, decoder
from utils.summary_tools import write_summary, write_hist
from option_agent.base_option_agent import Option_Agent
cl... | 14,785 | 53.161172 | 172 | py |
ODPP | ODPP-main/ODPP_Prior/option_agent/DCO.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value
from utils.summary_tools import write_summary
from spectral_DPP_agent.laprepr import LapReprLearner
from option_agent.hierarchical_po... | 12,935 | 50.130435 | 180 | py |
ODPP | ODPP-main/ODPP_Prior/option_agent/base_option_agent.py | import os
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from utils.summary_tools import write_summary
from option_agent.hierarchical_policy import get_final_state_value, get_return_array, get_advantage
class Option_Agent(object):
def __init__(self, args):
... | 10,035 | 50.204082 | 156 | py |
ODPP | ODPP-main/ODPP_Prior/option_agent/ODPP.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value, rnn_decoder, prior
from utils.summary_tools import write_summary
from spectral_DPP_agent.laprepr import LapReprLearner
from spectral... | 20,260 | 52.885638 | 164 | py |
ODPP | ODPP-main/ODPP_Prior/option_agent/DIAYN.py | import os
import torch
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from learner import policy, value, decoder
from utils.summary_tools import write_summary
from option_agent.base_option_agent import Option_Agent
class DIAYN_Agent(Op... | 10,567 | 53.756477 | 183 | py |
ODPP | ODPP-main/ODPP_Prior/utils/buffer.py | import torch as th
import numpy as np
from types import SimpleNamespace as SN
class OneHot(object):
def __init__(self, out_dim):
self.out_dim = out_dim
def transform(self, tensor): # check
y_onehot = tensor.new(*tensor.shape[:-1], self.out_dim).zero_()
y_onehot.scatter_(-1, tensor.lon... | 7,036 | 40.394118 | 159 | py |
ODPP | ODPP-main/ODPP_Prior/utils/torch_tools.py | import numpy as np
import torch
def to_tensor(x, device):
"""return a torch.Tensor, assume x is an np array."""
if x.dtype in [np.float32, np.float64]:
return torch.tensor(x, dtype=torch.float32, device=device)
elif x.dtype in [np.int32, np.int64, np.uint8]:
return torch.tensor(x, dtype=tor... | 421 | 37.363636 | 66 | py |
ODPP | ODPP-main/ODPP_Prior/utils/summary_tools.py | import numpy as np
def get_summary_str(step=None, info=None, prefix=''):
summary_str = prefix
if step is not None:
summary_str += 'Step {}; '.format(step)
for key, val in info.items():
if isinstance(val, (int, np.int32, np.int64)):
summary_str += '{} {}; '.format(key, val)
... | 834 | 35.304348 | 85 | py |
ODPP | ODPP-main/ODPP_Prior/learner/value.py | import torch.nn as nn
from learner.base_mlp import MLP
# TODO: share layer with the policy netwrok
class ValueFuntion(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(ValueFuntion, self).__init__()
self.mlp = MLP(layers=[input_dim, hidden_dim, hidden_dim, 1])
def forward(self, x):... | 385 | 28.692308 | 69 | py |
ODPP | ODPP-main/ODPP_Prior/learner/base_mlp.py | import torch
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, layers, activation=torch.tanh, output_activation=None, init=True): # TODO:relu
super(MLP, self).__init__()
self.layers = nn.ModuleList()
self.activation = activation
self.output_activation = output_activatio... | 1,057 | 34.266667 | 101 | py |
ODPP | ODPP-main/ODPP_Prior/learner/rnn_decoder.py | import torch
import torch.nn as nn
from torch.distributions.categorical import Categorical
class RNN_Decoder(nn.Module):
def __init__(self, input_dim, hidden_dim, code_dim):
super(RNN_Decoder, self).__init__()
self.lstm = nn.LSTM(input_size=input_dim, hidden_size=hidden_dim, batch_first=True, bidir... | 1,253 | 40.8 | 111 | py |
ODPP | ODPP-main/ODPP_Prior/learner/prior.py | import torch.nn as nn
from torch.distributions.categorical import Categorical
from learner.base_mlp import MLP
class Prior(nn.Module):
def __init__(self, input_dim, hidden_dim, code_dim, is_high=False):
super(Prior, self).__init__()
self.is_high = is_high
self.mlp = MLP(layers=[input_dim,... | 1,225 | 32.135135 | 91 | py |
ODPP | ODPP-main/ODPP_Prior/learner/policy.py | import torch
import torch.nn as nn
from torch.distributions.normal import Normal
from torch.distributions.categorical import Categorical
import numpy as np
from learner.base_mlp import MLP
class GaussianPolicy(nn.Module):
def __init__(self, input_dim, hidden_dim, action_dim, output_activation=None, act_range=None... | 1,980 | 36.377358 | 153 | py |
ODPP | ODPP-main/ODPP_Prior/learner/decoder.py | import torch.nn as nn
from torch.distributions.categorical import Categorical
from learner.base_mlp import MLP
class Decoder(nn.Module):
def __init__(self, input_dim, hidden_dim, code_dim):
super(Decoder, self).__init__()
self.mlp = MLP(layers=[input_dim, hidden_dim, hidden_dim, code_dim])
de... | 861 | 29.785714 | 76 | py |
ODPP | ODPP-main/ODPP_Prior/spectral_DPP_agent/laprepr.py | import os
import collections
import numpy as np
from tqdm import tqdm
import torch
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from configs import get_laprepr_args
from utils import torch_tools, timer_tools, summary_tools
from spectral_DPP_agent.spectral_buffer import EpisodicReplayBuffer... | 13,542 | 40.039394 | 132 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.