repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
GRACE | GRACE-master/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
class LogReg(nn.Module):
def __init__(self, ft_in, nb_classes):
super(LogReg, self).__init__()
self.fc = nn.Linear(ft_in, nb_classes)
for m in self.modules():
self.weights... | 4,147 | 31.155039 | 79 | py |
GRACE | GRACE-master/train.py | import argparse
import os.path as osp
import random
from time import perf_counter as t
import yaml
from yaml import SafeLoader
import torch
import torch_geometric.transforms as T
import torch.nn.functional as F
import torch.nn as nn
from torch_geometric.datasets import Planetoid, CitationFull
from torch_geometric.util... | 3,459 | 31.037037 | 78 | py |
datadeletion | datadeletion-master/data/process_cifar_data.py | import pickle
import numpy as np
import sys
import torch
from torch.utils.data import TensorDataset
import torchvision.transforms as transforms
import torchvision.models as models
if sys.platform == "darwin": # Apple
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
elif 'linux' in... | 7,575 | 34.568075 | 108 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/dynamics_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
import gym
class LatentDynamicsModel(nn.Module):
def __init__(self, latent_dim, n_skills):
super().__init__()
self.latent_dim = latent_dim
self.n_skills = n_skills
self.net_ =... | 4,566 | 36.130081 | 118 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/inverse_model.py | import torch
import torch.nn as nn
import gym
import torch.nn.functional as F
class InverseModel(nn.Module):
def __init__(self, obs_space, n_skills, h_dim):
super().__init__()
assert "obs" in obs_space.keys() and "zone_obs" in obs_space.keys()
self.obs_size = obs_space["obs"][0]
... | 1,128 | 30.361111 | 102 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/env_model.py | import torch
import torch.nn as nn
import gym
import torch.nn.functional as F
def getHiEnvEncoder(obs_space, h_dim):
if "zone_obs" in obs_space:
return ZoneEnvModel(obs_space, h_dim)
else:
raise NotImplementedError()
def getLoEnvEncoder(obs_space, n_skills, h_dim):
if "skill" in obs_space... | 4,033 | 33.186441 | 126 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/policy_network.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical, Normal
from gym.spaces import Box, Discrete
class PolicyNetwork(nn.Module):
def __init__(self, in_dim, action_space, hiddens=[], scales=None, activation=nn.ReLU()):
super().__init__()
... | 2,373 | 36.68254 | 92 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/flat_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
import torch_ac
from env_model import getEnvModel
from policy_network import PolicyNetwork
# Function from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py
def init_param... | 2,205 | 30.514286 | 108 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/hier_policy_value_models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical, Normal
import torch_ac
import gym
from env_model import getLoEnvEncoder, getHiEnvEncoder, getEnvModel
from policy_network import PolicyNetwork
# Function from https://github.com/ikostrikov/pytorch-a2c-ppo-ac... | 2,683 | 34.315789 | 174 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/torch_ac/format.py | import torch
def default_preprocess_obss(obss, device=None):
return torch.tensor(obss, device=device) | 106 | 25.75 | 47 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/torch_ac/model.py | from abc import abstractmethod, abstractproperty
import torch.nn as nn
import torch.nn.functional as F
class ACModel:
recurrent = False
@abstractmethod
def __init__(self, obs_space, action_space):
pass
@abstractmethod
def forward(self, obs):
pass
class RecurrentACModel(ACModel):
... | 485 | 17.692308 | 48 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/torch_ac/__init__.py | from torch_ac.algos import A2CAlgo, PPOAlgo
from torch_ac.model import ACModel, RecurrentACModel
from torch_ac.torch_utils import DictList | 138 | 45.333333 | 52 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/torch_ac/torch_utils/__init__.py | from torch_ac.torch_utils.dictlist import DictList
from torch_ac.torch_utils.penv import ParallelEnv | 100 | 49.5 | 50 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/torch_ac/algos/base.py | from abc import ABC, abstractmethod
import torch
from torch_ac.format import default_preprocess_obss
from torch_ac.torch_utils import DictList, ParallelEnv
import numpy as np
class BaseAlgo(ABC):
"""The base class for RL algorithms."""
def __init__(self, envs, acmodel, device, num_frames_per_proc, discount,... | 10,510 | 40.38189 | 114 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/torch_ac/algos/hier_ppo.py | import numpy
import torch
import torch.nn.functional as F
from torch_ac.algos.hier_base import HierBaseAlgo
class HierPPOAlgo(HierBaseAlgo):
"""The Proximal Policy Optimization algorithm
([Schulman et al., 2015](https://arxiv.org/abs/1707.06347))."""
def __init__(self, envs, hi_acmodel, lo_acmodel, devic... | 12,252 | 39.572848 | 148 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/torch_ac/algos/a2c.py | import numpy
import torch
import torch.nn.functional as F
from torch_ac.algos.base import BaseAlgo
class A2CAlgo(BaseAlgo):
"""The Advantage Actor-Critic algorithm."""
def __init__(self, envs, acmodel, device=None, num_frames_per_proc=None, discount=0.99, lr=0.01, gae_lambda=0.95,
entropy_co... | 3,659 | 31.972973 | 117 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/torch_ac/algos/ppo.py | import numpy
import torch
import torch.nn.functional as F
from torch.distributions import Categorical, Normal
from torch_ac.algos.base import BaseAlgo
class PPOAlgo(BaseAlgo):
"""The Proximal Policy Optimization algorithm
([Schulman et al., 2015](https://arxiv.org/abs/1707.06347))."""
def __init__(self, e... | 7,578 | 40.190217 | 146 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/torch_ac/algos/hier_base.py | from abc import ABC, abstractmethod
import torch
from envs.wrappers import WaitWrapper
from torch_ac.format import default_preprocess_obss
from torch_ac.torch_utils import DictList, ParallelEnv
import numpy as np
# PPO with a high level and low level policy. The high level policy only takes a step every `skill_len` s... | 15,578 | 45.924699 | 141 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/torch_ac/algos/_hier_policy_opt.py | import torch, torch.nn.functional as F
from torch_ac.torch_utils import DictList, ParallelEnv
import numpy as np
import pdb
import copy
import random
## Samples the environment using the policy. Uses the standard RL framework where the policy observes each observation.
def collect_experiences(self):
# Collect ex... | 16,788 | 38.503529 | 156 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/torch_ac/algos/__init__.py | from torch_ac.algos.a2c import A2CAlgo
from torch_ac.algos.ppo import PPOAlgo
from torch_ac.algos.hier_ppo import HierPPOAlgo
from torch_ac.algos.hrl_policy_planner import HierPolicyAlgo | 187 | 36.6 | 60 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/torch_ac/algos/hrl_policy_planner.py | import torch
from envs.wrappers import WaitWrapper
from torch_ac.format import default_preprocess_obss
from torch_ac.torch_utils import DictList, ParallelEnv
import numpy as np
# PPO with a high level and low level policy. The high level policy only takes a step every `skill_len` steps.
class HierPolicyAlgo:
"""T... | 5,234 | 41.217742 | 133 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/utils/storage.py | import csv
import os
import torch
import logging
import sys
import utils
def create_folders_if_necessary(path):
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def get_storage_dir():
if "RL_STORAGE" in os.environ:
return os.environ["RL_STORAGE"]
r... | 2,148 | 22.877778 | 102 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/utils/format.py | import os
import json
import numpy as np
import re
import torch
import torch_ac
import gym
import utils
def get_obss_preprocessor(obs_space):
# # LidarEnv-v0
if isinstance(obs_space, gym.spaces.Box):
obs_space = {'obs': obs_space.shape}
def preprocess_obss(obss, device=None):
ret... | 5,197 | 41.606557 | 172 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/utils/agent.py | import torch
import utils
from flat_model import ACModel
class Agent:
"""An agent.
It is able:
- to choose an action given an observation,
- to analyze the feedback (i.e. reward and done state) of its action."""
def __init__(self, obs_space, action_space, model_dir,
device=None... | 2,002 | 34.767857 | 117 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/utils/hier_agent.py | import torch
import torch.nn.functional as F
import utils
from hier_policy_value_models import HighPolicyValueModel, LoPolicyValueModel
from dynamics_model import LatentDynamicsModel
from env_model import getHiEnvEncoder
class HierAgent:
"""An agent.
It is able:
- to choose an action given an observation... | 1,973 | 34.890909 | 105 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/utils/__init__.py | from torch_ac.torch_utils.dictlist import DictList
from torch_ac.torch_utils.penv import ParallelEnv
from .agent import *
from .hier_agent import *
from .format import *
from .other import *
from .storage import * | 215 | 23 | 50 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/src/utils/other.py | import random
import numpy
import torch
import collections
def seed(seed):
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def synthesize(array):
d = collections.OrderedDict()
d["mean"] = numpy.mean(arra... | 434 | 18.772727 | 40 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/scripts/measure_env_variance.py | import argparse
import time
import numpy
import torch
import pickle
import os
import utils
from envs.make_env import make_test_env, make_fixed_env
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--env", required=True,
help="name of the environment to be run (REQUIRED)")
... | 2,010 | 26.175676 | 101 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/scripts/train_skill_planner.py | import argparse
import time
import datetime
import torch
import torch_ac
import wandb
import sys
import os
import utils
from envs.make_env import make_train_env
from hier_policy_value_models import HighPolicyValueModel, LoPolicyValueModel
from env_model import getHiEnvEncoder
from inverse_model import InverseModel
#... | 10,357 | 41.979253 | 172 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/scripts/evaluate_hier.py | import argparse
import time
import numpy
import torch
import os
import pickle
import utils
from envs.make_env import make_test_env, make_fixed_env
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--env", required=True,
help="name of the environment to be run (REQUIRED)")
... | 2,431 | 26.022222 | 95 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/scripts/visualize_hier.py | import argparse
import time
import numpy as np
import torch
import torch.nn.functional as F
import utils
from envs.make_env import make_test_env
# from sequence.sequence_helper import *
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--env", required=True,
help="name of t... | 2,718 | 27.925532 | 88 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/options/scripts/visualize.py | import argparse
import time
import numpy
import torch
import utils
from envs.make_env import make_test_env, make_fixed_env
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--env", required=True,
help="name of the environment to be run (REQUIRED)")
parser.add_argument("--m... | 2,436 | 28.011905 | 101 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/dynamics_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
import gym
class LatentDynamicsModel(nn.Module):
def __init__(self, latent_dim, n_skills):
super().__init__()
self.latent_dim = latent_dim
self.n_skills = n_skills
self.net_ =... | 4,566 | 36.130081 | 118 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/inverse_model.py | import torch
import torch.nn as nn
import gym
import torch.nn.functional as F
class InverseModel(nn.Module):
def __init__(self, obs_space, n_skills, h_dim):
super().__init__()
assert "obs" in obs_space.keys() and "zone_obs" in obs_space.keys()
self.obs_size = obs_space["obs"][0]
... | 1,128 | 30.361111 | 102 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/env_model.py | import torch
import torch.nn as nn
import gym
import torch.nn.functional as F
def getHiEnvEncoder(obs_space, h_dim):
if "zone_obs" in obs_space:
return ZoneEnvModel(obs_space, h_dim)
else:
raise NotImplementedError()
def getLoEnvEncoder(obs_space, h_dim):
return ZoneEnvGoalModel(obs_space... | 3,785 | 33.108108 | 125 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/policy_network.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical, Normal
from gym.spaces import Box, Discrete
class PolicyNetwork(nn.Module):
def __init__(self, in_dim, action_space, hiddens=[], scales=None, activation=nn.ReLU()):
super().__init__()
... | 2,370 | 36.634921 | 92 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/flat_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
import torch_ac
from env_model import getEnvModel
from policy_network import PolicyNetwork
# Function from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py
def init_param... | 1,474 | 26.830189 | 108 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/hier_policy_value_models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical, Normal
import torch_ac
import gym
from env_model import getLoEnvEncoder, getHiEnvEncoder, getEnvModel
from policy_network import PolicyNetwork
# Function from https://github.com/ikostrikov/pytorch-a2c-ppo-ac... | 2,334 | 30.986301 | 123 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/torch_ac/format.py | import torch
def default_preprocess_obss(obss, device=None):
return torch.tensor(obss, device=device) | 106 | 25.75 | 47 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/torch_ac/model.py | from abc import abstractmethod, abstractproperty
import torch.nn as nn
import torch.nn.functional as F
class ACModel:
recurrent = False
@abstractmethod
def __init__(self, obs_space, action_space):
pass
@abstractmethod
def forward(self, obs):
pass
class RecurrentACModel(ACModel):
... | 485 | 17.692308 | 48 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/torch_ac/__init__.py | from torch_ac.algos import A2CAlgo, PPOAlgo
from torch_ac.model import ACModel, RecurrentACModel
from torch_ac.torch_utils import DictList | 138 | 45.333333 | 52 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/torch_ac/torch_utils/__init__.py | from torch_ac.torch_utils.dictlist import DictList
from torch_ac.torch_utils.penv import ParallelEnv | 100 | 49.5 | 50 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/torch_ac/algos/base.py | from abc import ABC, abstractmethod
import torch
from torch_ac.format import default_preprocess_obss
from torch_ac.torch_utils import DictList, ParallelEnv
import numpy as np
class BaseAlgo(ABC):
"""The base class for RL algorithms."""
def __init__(self, envs, acmodel, device, num_frames_per_proc, discount,... | 10,274 | 40.59919 | 110 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/torch_ac/algos/hier_ppo.py | import numpy
import torch
import torch.nn.functional as F
from torch_ac.algos.hier_base import HierBaseAlgo
class HierPPOAlgo(HierBaseAlgo):
"""The Proximal Policy Optimization algorithm
([Schulman et al., 2015](https://arxiv.org/abs/1707.06347))."""
def __init__(self, envs, hi_acmodel, lo_acmodel, devic... | 12,252 | 39.572848 | 148 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/torch_ac/algos/a2c.py | import numpy
import torch
import torch.nn.functional as F
from torch_ac.algos.base import BaseAlgo
class A2CAlgo(BaseAlgo):
"""The Advantage Actor-Critic algorithm."""
def __init__(self, envs, acmodel, device=None, num_frames_per_proc=None, discount=0.99, lr=0.01, gae_lambda=0.95,
entropy_co... | 3,659 | 31.972973 | 117 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/torch_ac/algos/ppo.py | import numpy
import torch
import torch.nn.functional as F
from torch_ac.algos.base import BaseAlgo
class PPOAlgo(BaseAlgo):
"""The Proximal Policy Optimization algorithm
([Schulman et al., 2015](https://arxiv.org/abs/1707.06347))."""
def __init__(self, envs, acmodel, device=None, num_frames_per_proc=None... | 6,407 | 38.555556 | 125 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/torch_ac/algos/hier_base.py | from abc import ABC, abstractmethod
import torch
from envs.wrappers import WaitWrapper
from torch_ac.format import default_preprocess_obss
from torch_ac.torch_utils import DictList, ParallelEnv
import numpy as np
# PPO with a high level and low level policy. The high level policy only takes a step every `skill_len` s... | 15,578 | 45.924699 | 141 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/torch_ac/algos/_hier_policy_opt.py | import torch, torch.nn.functional as F
from torch_ac.torch_utils import DictList, ParallelEnv
import numpy as np
import pdb
import copy
import random
## Samples the environment using the policy. Uses the standard RL framework where the policy observes each observation.
def collect_experiences(self):
# Reset the e... | 16,502 | 39.647783 | 156 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/torch_ac/algos/__init__.py | from torch_ac.algos.a2c import A2CAlgo
from torch_ac.algos.ppo import PPOAlgo
from torch_ac.algos.hier_ppo import HierPPOAlgo
from torch_ac.algos.hrl_policy_planner import HierPolicyAlgo | 187 | 36.6 | 60 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/torch_ac/algos/hrl_policy_planner.py | import torch
from envs.wrappers import WaitWrapper
from torch_ac.format import default_preprocess_obss
from torch_ac.torch_utils import DictList, ParallelEnv
import numpy as np
# PPO with a high level and low level policy. The high level policy only takes a step every `skill_len` steps.
class HierPolicyAlgo:
"""T... | 5,291 | 42.02439 | 133 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/utils/storage.py | import csv
import os
import torch
import logging
import sys
import utils
def create_folders_if_necessary(path):
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def get_storage_dir():
if "RL_STORAGE" in os.environ:
return os.environ["RL_STORAGE"]
r... | 2,148 | 22.877778 | 102 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/utils/format.py | import os
import json
import numpy as np
import re
import torch
import torch_ac
import gym
import utils
def get_obss_preprocessor(obs_space):
# # LidarEnv-v0
if isinstance(obs_space, gym.spaces.Box):
obs_space = {'obs': obs_space.shape}
def preprocess_obss(obss, device=None):
ret... | 5,197 | 41.606557 | 172 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/utils/agent.py | import torch
import utils
from flat_model import ACModel
class Agent:
"""An agent.
It is able:
- to choose an action given an observation,
- to analyze the feedback (i.e. reward and done state) of its action."""
def __init__(self, obs_space, action_space, model_dir,
device=None... | 1,931 | 32.894737 | 100 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/utils/hier_agent.py | import torch
import torch.nn.functional as F
import utils
from hier_policy_value_models import HighPolicyValueModel, LoPolicyValueModel
from dynamics_model import LatentDynamicsModel
from env_model import getHiEnvEncoder
class HierAgent:
"""An agent.
It is able:
- to choose an action given an observation... | 1,905 | 34.296296 | 105 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/utils/__init__.py | from torch_ac.torch_utils.dictlist import DictList
from torch_ac.torch_utils.penv import ParallelEnv
from .agent import *
from .hier_agent import *
from .format import *
from .other import *
from .storage import * | 215 | 23 | 50 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/src/utils/other.py | import random
import numpy
import torch
import collections
def seed(seed):
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def synthesize(array):
d = collections.OrderedDict()
d["mean"] = numpy.mean(arra... | 434 | 18.772727 | 40 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/scripts/measure_env_variance.py | import argparse
import time
import numpy as np
import torch
import torch.nn.functional as F
import utils
from envs.make_env import make_fixed_env
# from sequence.sequence_helper import *
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--env", required=True,
help="name of ... | 3,119 | 29.588235 | 102 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/scripts/train_skill_planner.py | import argparse
import time
import datetime
import torch
import torch_ac
import wandb
import sys
import os
import utils
from envs.make_env import make_train_env
from hier_policy_value_models import HighPolicyValueModel, LoPolicyValueModel
from env_model import getHiEnvEncoder
# Parse arguments
parser = argparse.Arg... | 9,956 | 41.733906 | 172 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/scripts/evaluate_xy_hrl.py | import argparse
import time
import numpy
import torch
import os
import pickle
import utils
from envs.make_env import make_test_env, make_fixed_env
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--env", required=True,
help="name of the environment to be run (REQUIRED)")
... | 2,287 | 25.298851 | 96 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/xy-goals/scripts/visualize_hier.py | import argparse
import time
import numpy as np
import torch
import torch.nn.functional as F
import utils
from envs.make_env import make_test_env
# from sequence.sequence_helper import *
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--env", required=True,
help="name of t... | 2,302 | 26.416667 | 75 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/dynamics_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
import gym
class LatentDynamicsModel(nn.Module):
def __init__(self, latent_dim, n_skills):
super().__init__()
self.latent_dim = latent_dim
self.n_skills = n_skills
self.net_ =... | 4,566 | 36.130081 | 118 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/inverse_model.py | import torch
import torch.nn as nn
import gym
import torch.nn.functional as F
class InverseModel(nn.Module):
def __init__(self, obs_space, n_skills, h_dim):
super().__init__()
assert "obs" in obs_space.keys() and "zone_obs" in obs_space.keys()
self.obs_size = obs_space["obs"][0]
... | 1,128 | 30.361111 | 102 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/env_model.py | import torch
import torch.nn as nn
import gym
import torch.nn.functional as F
def getHiEnvEncoder(obs_space, h_dim):
if "zone_obs" in obs_space:
return ZoneEnvModel(obs_space, h_dim)
else:
raise NotImplementedError()
def getLoEnvEncoder(obs_space, goal_dim, h_dim):
if "goal" in obs_space:... | 4,021 | 33.376068 | 125 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/policy_network.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical, Normal
from gym.spaces import Box, Discrete
class PolicyNetwork(nn.Module):
def __init__(self, in_dim, action_space, hiddens=[], scales=None, activation=nn.ReLU()):
super().__init__()
... | 2,373 | 36.68254 | 92 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/flat_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
import torch_ac
from env_model import getEnvModel
from policy_network import PolicyNetwork
# Function from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py
def init_param... | 2,205 | 30.514286 | 108 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/hier_policy_value_models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical, Normal
import torch_ac
import gym
from env_model import getLoEnvEncoder, getHiEnvEncoder, getEnvModel
from policy_network import PolicyNetwork
# Function from https://github.com/ikostrikov/pytorch-a2c-ppo-ac... | 2,988 | 33.356322 | 142 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/torch_ac/format.py | import torch
def default_preprocess_obss(obss, device=None):
return torch.tensor(obss, device=device) | 106 | 25.75 | 47 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/torch_ac/model.py | from abc import abstractmethod, abstractproperty
import torch.nn as nn
import torch.nn.functional as F
class ACModel:
recurrent = False
@abstractmethod
def __init__(self, obs_space, action_space):
pass
@abstractmethod
def forward(self, obs):
pass
class RecurrentACModel(ACModel):
... | 485 | 17.692308 | 48 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/torch_ac/__init__.py | from torch_ac.algos import A2CAlgo, PPOAlgo
from torch_ac.model import ACModel, RecurrentACModel
from torch_ac.torch_utils import DictList | 138 | 45.333333 | 52 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/torch_ac/torch_utils/__init__.py | from torch_ac.torch_utils.dictlist import DictList
from torch_ac.torch_utils.penv import ParallelEnv | 100 | 49.5 | 50 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/torch_ac/algos/base.py | from abc import ABC, abstractmethod
import torch
from torch_ac.format import default_preprocess_obss
from torch_ac.torch_utils import DictList, ParallelEnv
import numpy as np
class BaseAlgo(ABC):
"""The base class for RL algorithms."""
def __init__(self, envs, acmodel, device, num_frames_per_proc, discount,... | 10,354 | 40.923077 | 110 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/torch_ac/algos/hier_ppo.py | import numpy
import torch
import torch.nn.functional as F
from torch_ac.algos.hier_base import HierBaseAlgo
class HierPPOAlgo(HierBaseAlgo):
"""The Proximal Policy Optimization algorithm
([Schulman et al., 2015](https://arxiv.org/abs/1707.06347))."""
def __init__(self, envs, hi_acmodel, lo_acmodel, devic... | 12,252 | 39.572848 | 148 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/torch_ac/algos/a2c.py | import numpy
import torch
import torch.nn.functional as F
from torch_ac.algos.base import BaseAlgo
class A2CAlgo(BaseAlgo):
"""The Advantage Actor-Critic algorithm."""
def __init__(self, envs, acmodel, device=None, num_frames_per_proc=None, discount=0.99, lr=0.01, gae_lambda=0.95,
entropy_co... | 3,659 | 31.972973 | 117 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/torch_ac/algos/ppo.py | import numpy
import torch
import torch.nn.functional as F
from torch_ac.algos.base import BaseAlgo
class PPOAlgo(BaseAlgo):
"""The Proximal Policy Optimization algorithm
([Schulman et al., 2015](https://arxiv.org/abs/1707.06347))."""
def __init__(self, envs, acmodel, device=None, num_frames_per_proc=None... | 6,407 | 38.555556 | 125 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/torch_ac/algos/hier_base.py | from abc import ABC, abstractmethod
import torch
from envs.wrappers import WaitWrapper
from torch_ac.format import default_preprocess_obss
from torch_ac.torch_utils import DictList, ParallelEnv
import numpy as np
# PPO with a high level and low level policy. The high level policy only takes a step every `skill_len` s... | 15,578 | 45.924699 | 141 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/torch_ac/algos/_hier_policy_opt.py | import torch, torch.nn.functional as F
from torch_ac.torch_utils import DictList, ParallelEnv
from torch.distributions import Categorical
import numpy as np
import pdb
import copy
import random
def collect_experiences(self):
for i in range(self.num_frames_per_proc):
preprocessed_obs = self.preprocess_obss(... | 16,704 | 39.546117 | 156 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/torch_ac/algos/__init__.py | from torch_ac.algos.a2c import A2CAlgo
from torch_ac.algos.ppo import PPOAlgo
from torch_ac.algos.hier_ppo import HierPPOAlgo
from torch_ac.algos.hrl_policy_planner import HierPolicyAlgo | 187 | 36.6 | 60 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/torch_ac/algos/hrl_policy_planner.py | import torch
from envs.wrappers import WaitWrapper
from torch_ac.format import default_preprocess_obss
from torch_ac.torch_utils import DictList, ParallelEnv
import numpy as np
# PPO with a high level and low level policy. The high level policy only takes a step every `skill_len` steps.
class HierPolicyAlgo:
"""T... | 5,708 | 42.915385 | 133 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/utils/storage.py | import csv
import os
import torch
import logging
import sys
import utils
def create_folders_if_necessary(path):
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def get_storage_dir():
if "RL_STORAGE" in os.environ:
return os.environ["RL_STORAGE"]
r... | 2,148 | 22.877778 | 102 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/utils/format.py | import os
import json
import numpy as np
import re
import torch
import torch_ac
import gym
import utils
def get_obss_preprocessor(obs_space):
# # LidarEnv-v0
if isinstance(obs_space, gym.spaces.Box):
obs_space = {'obs': obs_space.shape}
def preprocess_obss(obss, device=None):
ret... | 5,197 | 41.606557 | 172 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/utils/agent.py | import torch
import utils
from flat_model import ACModel
class Agent:
"""An agent.
It is able:
- to choose an action given an observation,
- to analyze the feedback (i.e. reward and done state) of its action."""
def __init__(self, obs_space, action_space, model_dir,
device=None... | 1,931 | 32.894737 | 100 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/utils/hier_agent.py | import torch
import torch.nn.functional as F
import utils
from torch.distributions import Categorical
from hier_policy_value_models import HighPolicyValueModel, LoPolicyValueModel
from dynamics_model import LatentDynamicsModel
from env_model import getHiEnvEncoder
class HierAgent:
"""An agent.
It is able:
... | 2,074 | 35.403509 | 105 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/utils/__init__.py | from torch_ac.torch_utils.dictlist import DictList
from torch_ac.torch_utils.penv import ParallelEnv
from .agent import *
from .hier_agent import *
from .format import *
from .other import *
from .storage import * | 215 | 23 | 50 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/src/utils/other.py | import random
import numpy
import torch
import collections
def seed(seed):
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def synthesize(array):
d = collections.OrderedDict()
d["mean"] = numpy.mean(arra... | 434 | 18.772727 | 40 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/scripts/measure_env_variance.py | import argparse
import time
import numpy as np
import torch
import torch.nn.functional as F
import utils
from envs.make_env import make_fixed_env
# from sequence.sequence_helper import *
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--env", required=True,
help="name of ... | 3,119 | 29.588235 | 102 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/scripts/train_skill_planner.py | import argparse
import time
import datetime
import torch
import torch_ac
import wandb
import sys
import os
import utils
from envs.make_env import make_train_env
from hier_policy_value_models import HighPolicyValueModel, LoPolicyValueModel
from env_model import getHiEnvEncoder
from inverse_model import InverseModel
#... | 9,894 | 42.021739 | 172 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/scripts/evaluate_zone_hrl.py | import argparse
import time
import numpy
import torch
import os
import pickle
import utils
from envs.make_env import make_test_env, make_fixed_env
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--env", required=True,
help="name of the environment to be run (REQUIRED)")
... | 2,398 | 26.574713 | 119 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/zone-goals/scripts/visualize_hier.py | import argparse
import time
import numpy as np
import torch
import torch.nn.functional as F
import utils
from envs.make_env import make_test_env
# from sequence.sequence_helper import *
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--env", required=True,
help="name of t... | 2,608 | 27.358696 | 119 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/main/src/dynamics_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
import gym
class LatentDynamicsModel(nn.Module):
def __init__(self, latent_dim, n_skills):
super().__init__()
self.latent_dim = latent_dim
self.n_skills = n_skills
self.net_ =... | 4,566 | 36.130081 | 118 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/main/src/inverse_model.py | import torch
import torch.nn as nn
import gym
import torch.nn.functional as F
class InverseModel(nn.Module):
def __init__(self, obs_space, n_skills, h_dim):
super().__init__()
assert "obs" in obs_space.keys() and "zone_obs" in obs_space.keys()
self.obs_size = obs_space["obs"][0]
... | 1,128 | 30.361111 | 102 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/main/src/env_model.py | import torch
import torch.nn as nn
import gym
import torch.nn.functional as F
def getHiEnvEncoder(obs_space, h_dim):
if "zone_obs" in obs_space:
return ZoneEnvModel(obs_space, h_dim)
else:
raise NotImplementedError()
def getLoEnvEncoder(obs_space, n_skills, h_dim):
if "skill" in obs_space... | 4,033 | 33.186441 | 126 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/main/src/policy_network.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical, Normal
from gym.spaces import Box, Discrete
class PolicyNetwork(nn.Module):
def __init__(self, in_dim, action_space, hiddens=[], scales=None, activation=nn.ReLU()):
super().__init__()
... | 2,373 | 36.68254 | 92 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/main/src/flat_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
import torch_ac
from env_model import getEnvModel
from policy_network import PolicyNetwork
# Function from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py
def init_param... | 2,205 | 30.514286 | 108 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/main/src/hier_policy_value_models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical, Normal
import torch_ac
import gym
from env_model import getLoEnvEncoder, getHiEnvEncoder, getEnvModel
from policy_network import PolicyNetwork
# Function from https://github.com/ikostrikov/pytorch-a2c-ppo-ac... | 2,634 | 33.220779 | 125 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/main/src/torch_ac/format.py | import torch
def default_preprocess_obss(obss, device=None):
return torch.tensor(obss, device=device) | 106 | 25.75 | 47 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/main/src/torch_ac/model.py | from abc import abstractmethod, abstractproperty
import torch.nn as nn
import torch.nn.functional as F
class ACModel:
recurrent = False
@abstractmethod
def __init__(self, obs_space, action_space):
pass
@abstractmethod
def forward(self, obs):
pass
class RecurrentACModel(ACModel):
... | 485 | 17.692308 | 48 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/main/src/torch_ac/__init__.py | from torch_ac.algos import A2CAlgo, PPOAlgo
from torch_ac.model import ACModel, RecurrentACModel
from torch_ac.torch_utils import DictList | 138 | 45.333333 | 52 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/main/src/torch_ac/torch_utils/__init__.py | from torch_ac.torch_utils.dictlist import DictList
from torch_ac.torch_utils.penv import ParallelEnv | 100 | 49.5 | 50 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/main/src/torch_ac/algos/base.py | from abc import ABC, abstractmethod
import torch
from torch_ac.format import default_preprocess_obss
from torch_ac.torch_utils import DictList, ParallelEnv
import numpy as np
class BaseAlgo(ABC):
"""The base class for RL algorithms."""
def __init__(self, envs, acmodel, device, num_frames_per_proc, discount,... | 10,510 | 40.38189 | 114 | py |
combinatorial-rl-tasks | combinatorial-rl-tasks-master/main/src/torch_ac/algos/hier_ppo.py | import numpy
import torch
import torch.nn.functional as F
from torch_ac.algos.hier_base import HierBaseAlgo
class HierPPOAlgo(HierBaseAlgo):
"""The Proximal Policy Optimization algorithm
([Schulman et al., 2015](https://arxiv.org/abs/1707.06347))."""
def __init__(self, envs, hi_acmodel, lo_acmodel, devic... | 12,252 | 39.572848 | 148 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.