python_code
stringlengths
0
456k
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler import BinaryElementwiseHandler from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVect...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler.embedding_handler import ( EmbeddingFunctionHandler, EmbeddingModuleHandler, ) from colossalai.auto_parallel.tensor_shard.sharding_strategy ...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler import BMMFunctionHandler from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector fro...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler.default_reshape_handler import DefaultReshapeHandler from colossalai.auto_parallel.tensor_shard.node_handler.getitem_handler import GetItemHandler f...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler.conv_handler import ConvFunctionHandler, ConvModuleHandler from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, Operat...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler.layer_norm_handler import LayerNormModuleHandler from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataTyp...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler import BMMFunctionHandler from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector fro...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler.batch_norm_handler import BatchNormModuleHandler from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataTyp...
from typing import Optional, Tuple, Union import torch import torch.nn as nn from transformers.activations import ACT2FN from transformers.models.gpt2.modeling_gpt2 import BaseModelOutputWithPastAndCrossAttentions, GPT2PreTrainedModel from transformers.pytorch_utils import Conv1D class GPT2MLP(nn.Module): def _...
import torch import torch.nn as nn import transformers from torch.fx import GraphModule from colossalai.auto_parallel.tensor_shard.constants import BATCHNORM_MODULE_OP from colossalai.auto_parallel.tensor_shard.options import SolverOptions from colossalai.auto_parallel.tensor_shard.solver import CostGraph, GraphAnalys...
import copy import random from functools import partial from typing import Dict import numpy as np import pytest import torch import torch.multiprocessing as mp import transformers from torch.fx import GraphModule from colossalai.auto_parallel.tensor_shard.initialize import ( ModuleWrapper, build_strategy_con...
import torch import torch.nn.functional as F from colossalai.auto_parallel.passes.runtime_preparation_pass import node_args_converting_pass from colossalai.device.device_mesh import DeviceMesh from colossalai.fx.graph_module import ColoGraphModule from colossalai.fx.tracer import ColoTracer from colossalai.tensor.shar...
import torch import torch.nn.functional as F from colossalai.auto_parallel.passes.runtime_preparation_pass import size_value_converting_pass from colossalai.device.device_mesh import DeviceMesh from colossalai.fx.graph_module import ColoGraphModule from colossalai.fx.tracer import ColoTracer from colossalai.tensor.sha...
import pytest from functools import partial import numpy as np import random import torch import torch.multiprocessing as mp import colossalai from colossalai.utils import free_port from colossalai.testing import rerun_if_address_is_in_use from colossalai.tensor import ColoParameter, ProcessGroup, ShardSpec, Compute...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from functools import partial import pytest import torch import torch.multiprocessing as mp from checks_1d.check_layer_1d import * from colossalai.core import global_context as gpc from colossalai.initialize import launch from colossalai.logging import disable_existing...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch DEPTH = 4 BATCH_SIZE = 8 SEQ_LENGTH = 8 IMG_SIZE = 16 HIDDEN_SIZE = 8 NUM_CLASSES = 8 VOCAB_SIZE = 16 def check_equal(A, B): assert torch.allclose(A, B, rtol=1e-3, atol=1e-1) == True
import torch import torch.distributed as dist from torch.nn import Parameter from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.global_variables import tensor_parallel_env as env from colossalai.nn import ( Classifier1D, Embedding1D, ...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from functools import partial import pytest import torch import torch.multiprocessing as mp from colossalai.core import global_context as gpc from colossalai.initialize import launch from colossalai.logging import disable_existing_loggers from colossalai.utils import fr...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch DEPTH = 2 BATCH_SIZE = 8 SEQ_LENGTH = 8 HIDDEN_SIZE = 8 NUM_CLASSES = 8 VOCAB_SIZE = 16 IMG_SIZE = 16 def check_equal(A, B): assert torch.allclose(A, B, rtol=1e-3, atol=1e-2)
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.nn.layer.parallel_2d._operation import Matmul_AB_2D, Matmul_ABT_2D, Matmul_ATB_2D from colossalai.utils import get_current_device fro...
import torch from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.nn import (Classifier2D, CrossEntropyLoss2D, Embedding2D, LayerNorm2D, Linear2D, PatchEmbedding2D, VanillaClassifier, VanillaPatchEmbedding, VocabParallelCl...
import colossalai import colossalai.nn as col_nn import torch import torch.distributed as dist import torch.multiprocessing as mp import pytest from colossalai.core import global_context as gpc from colossalai.context import ParallelMode from colossalai.testing import rerun_if_address_is_in_use from functools import p...
import torch from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.nn import TransformerSelfAttentionRing from colossalai.utils import get_current_device def check_selfattention(): WORLD_SIZE = gpc.get_world_size(ParallelMode.SEQUENCE) SUB_SEQ_LENGTH = ...
from functools import partial import pytest import torch import torch.multiprocessing as mp from colossalai.core import global_context as gpc from colossalai.initialize import launch from colossalai.logging import disable_existing_loggers from colossalai.utils import free_port from colossalai.testing import rerun_if_a...
import torch from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.nn import (Classifier2p5D, CrossEntropyLoss2p5D, Embedding2p5D, LayerNorm2p5D, Linear2p5D, PatchEmbedding2p5D, VanillaClassifier, VanillaPatchEmbedding, Voc...
import torch from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.nn.layer.parallel_2p5d._operation import Matmul_AB_2p5D, Matmul_ABT_2p5D, \ Matmul_ATB_2p5D from colossalai.utils import get_current_device from colossalai.utils import print_rank_0 from .comm...
import torch TESSERACT_DIM = 2 TESSERACT_DEP = 2 BATCH_SIZE = 8 SEQ_LENGTH = 8 HIDDEN_SIZE = 8 NUM_CLASSES = 8 VOCAB_SIZE = 16 IMG_SIZE = 16 def check_equal(A, B): assert torch.allclose(A, B, rtol=1e-5, atol=1e-2)
#!/usr/bin/env python # -*- encoding: utf-8 -*- from functools import partial import pytest import torch import torch.multiprocessing as mp from colossalai.core import global_context as gpc from colossalai.initialize import launch from colossalai.logging import disable_existing_loggers from colossalai.utils import fre...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch DEPTH = 2 BATCH_SIZE = 8 SEQ_LENGTH = 8 HIDDEN_SIZE = 8 NUM_CLASSES = 8 NUM_BLOCKS = 2 IMG_SIZE = 16 VOCAB_SIZE = 16 def check_equal(A, B): eq = torch.allclose(A, B, rtol=1e-3, atol=1e-2) assert eq, f"\nA = {A}\nB = {B}" return eq
#!/usr/bin/env python # -*- encoding: utf-8 -*- import time import torch from colossalai.constants import INPUT_GROUP_3D, OUTPUT_GROUP_3D, WEIGHT_GROUP_3D from colossalai.core import global_context from colossalai.logging import get_dist_logger from colossalai.nn import ( Classifier3D, CrossEntropyLoss3D, ...
import pytest import torch from colossalai.gemini.stateful_tensor import TensorState, StatefulTensor @pytest.mark.dist def test_gemini_manager(): # reset the manager, in case that there exists memory information left manager = StatefulTensor.GST_MGR manager.reset() # occupation 8 st1 = StatefulT...
import copy import torch from colossalai.gemini.paramhooks import BaseParamHookMgr from tests.components_to_test.registry import non_distributed_component_funcs def allclose(tensor_a: torch.Tensor, tensor_b: torch.Tensor, loose=False) -> bool: if loose: return torch.allclose(tensor_a, tensor_b, atol=1e-...
from copy import deepcopy import numpy as np import torch from colossalai.gemini.memory_tracer.runtime_mem_tracer import RuntimeMemTracer from colossalai.utils.model.colo_init_context import ColoInitContext from tests.components_to_test import run_fwd_bwd from tests.components_to_test.registry import non_distributed_...
from functools import partial from typing import Callable import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing import assert_close import colossalai from colossalai.amp import convert_to_apex_amp ...
from functools import partial import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp import colossalai from colossalai.gemini.chunk import init_chunk_manager, search_chunk_configuration from colossalai.tensor import ComputePattern, ComputeSpec, ProcessGroup, ShardSpec from colo...
from functools import partial import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp import colossalai from colossalai.gemini import TensorState from colossalai.gemini.chunk import Chunk from colossalai.tensor import ColoParameter from colossalai.tensor import ProcessGroup as C...
from functools import partial import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing import assert_close import colossalai from colossalai.amp import convert_to_apex_amp from colossalai.gemini.chunk...
from functools import partial import pytest import torch import torch.multiprocessing as mp from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing import assert_close import colossalai from colossalai.amp import convert_to_apex_amp from colossalai.gemini.chunk import ChunkManager, search_chun...
from functools import partial from time import time import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing import assert_close import colossalai from colossalai.amp import convert_to_apex_amp from c...
import os from functools import partial import pytest import torch import torch.multiprocessing as mp import colossalai from colossalai.nn.parallel import GeminiDDP from colossalai.nn.parallel.utils import get_static_torch_model from colossalai.tensor import ColoParameter from colossalai.testing import parameterize, ...
from functools import partial import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp from torch.testing import assert_close import colossalai from colossalai.gemini.chunk import ChunkManager, search_chunk_configuration from colossalai.gemini.gemini_mgr import GeminiManager from...
from functools import partial import pytest import torch import torch.multiprocessing as mp import colossalai from colossalai.gemini.chunk import ChunkManager from colossalai.tensor import ColoTensor, ColoTensorSpec, ProcessGroup from colossalai.testing import parameterize, rerun_if_address_is_in_use from colossalai....
from functools import partial import pytest import torch import torch.multiprocessing as mp import colossalai from colossalai.gemini.chunk import ChunkManager, search_chunk_configuration from colossalai.gemini.gemini_mgr import GeminiManager from colossalai.gemini.memory_tracer.runtime_mem_tracer import RuntimeMemTra...
from functools import partial import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp import colossalai from colossalai.gemini.chunk import ChunkManager, search_chunk_configuration from colossalai.gemini.gemini_mgr import GeminiManager from colossalai.nn.optimizer import HybridA...
#!/usr/bin/env python # -*- encoding: utf-8 -*- train_data = dict( dataset=dict( type='CIFAR10Dataset', root='/path/to/data', download=True, transform_pipeline=[ dict(type='RandomResizedCrop', size=224), dict(type='RandomHorizontalFlip'), dict(typ...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from pathlib import Path import pytest from colossalai.context.config import Config @pytest.mark.cpu def test_load_config(): filename = Path(__file__).parent.joinpath('sample_config.py') config = Config.from_file(filename) assert config.train_data, 'cann...
import torch from functools import partial import pytest import torch.distributed as dist import torch.multiprocessing as mp from torch.distributed import ReduceOp from colossalai.core import global_context as gpc from colossalai.initialize import launch from colossalai.utils import free_port from colossalai.testing i...
from functools import partial import pytest import torch.multiprocessing as mp from colossalai.device import AlphaBetaProfiler from colossalai.initialize import launch from colossalai.logging import disable_existing_loggers from colossalai.testing import parameterize, rerun_if_address_is_in_use from colossalai.utils ...
from functools import partial import pytest import torch.multiprocessing as mp from colossalai.device import AlphaBetaProfiler from colossalai.initialize import launch from colossalai.logging import disable_existing_loggers from colossalai.testing import parameterize, rerun_if_address_is_in_use from colossalai.utils ...
from functools import partial import pytest import torch.multiprocessing as mp from colossalai.device import AlphaBetaProfiler from colossalai.initialize import launch from colossalai.logging import disable_existing_loggers from colossalai.testing import parameterize, rerun_if_address_is_in_use from colossalai.utils ...
from colossalai.device.device_mesh import DeviceMesh import torch def test_device_mesh(): physical_mesh_id = torch.arange(0, 16).reshape(2, 8) mesh_shape = (4, 4) # [[0, 1, 2, 3], # [4, 5, 6, 7], # [8, 9, 10,11], # [12,13,14,15]] device_mesh = DeviceMesh(physical_mesh_id, mesh_shape) ...
import torch import pytest import os import torch.multiprocessing as mp import torch.distributed.rpc as rpc from torch import nn from torch._C._distributed_rpc import _is_current_rpc_agent_set from colossalai import launch from colossalai.logging import disable_existing_loggers from colossalai.pipeline.pipeline_proces...
import torch import torch.multiprocessing as mp from colossalai.pipeline.pipelinable import PipelinableContext from colossalai.testing import rerun_on_exception NUM_CHUNKS = 1 PIPELINE_SIZE = 2 class MLP(torch.nn.Module): def __init__(self, dim: int = 256): super().__init__() intermediate_dim ...
import torch from torch import nn from torch import autograd from colossalai.pipeline.rpc._pipeline_schedule import FillDrainPipelineEngine, OneFOneBPipelineEngine from colossalai.testing import assert_close from rpc_test_utils import rpc_run, parse_args, RpcTestModel feat_num = 100 h = 100 def partition(pp_rank: i...
import argparse import os import warnings import torch import torch.distributed as dist import torch.distributed.rpc as rpc import torch.multiprocessing as mp from colossalai import launch from colossalai.logging import disable_existing_loggers from colossalai.pipeline.pipeline_process_group import ppg from torch impo...
import os import torch.distributed.rpc as rpc import torch.multiprocessing as mp import pytest from colossalai.pipeline.pipeline_process_group import ppg from colossalai.initialize import launch from colossalai.logging import disable_existing_loggers from rpc_test_utils import pg_parse_args, rpc_is_initialized def ...
import torch from torch import nn from colossalai.pipeline.rpc._pipeline_schedule import FillDrainPipelineEngine, OneFOneBPipelineEngine from rpc_test_utils import rpc_run, parse_args, RpcTestModel # global variable for model created feat_num = 100 h = 100 def partition(pp_rank: int, chunk: int, stage_num: int): ...
import torch from torch import nn import torch.autograd as autograd from colossalai.pipeline.rpc import ChimeraPipelineEngine from colossalai.testing import assert_close from rpc_test_utils import rpc_run, parse_args, RpcTestModel # global variable for model created feat_num = 100 h = 100 def partition(pp_rank: int...
import os from typing import Callable, List, Optional, Type, Union import time import pytest import torch import torch.nn as nn from titans.dataloader.cifar10 import build_cifar from torchvision.models import resnet50 from torchvision.models.resnet import BasicBlock, Bottleneck, conv1x1 from tqdm import tqdm from rpc...
import torch from torch import nn from torch import autograd from torch.optim import SGD, Adam, RMSprop, Optimizer from colossalai.pipeline.rpc._pipeline_schedule import FillDrainPipelineEngine, OneFOneBPipelineEngine from colossalai.testing import assert_close from rpc_test_utils import rpc_run, parse_args, RpcTestMo...
from functools import partial import pytest import torch import torch.multiprocessing as mp from torch.nn.parallel import DistributedDataParallel as DDP import colossalai from colossalai.amp import convert_to_apex_amp from colossalai.gemini.chunk import search_chunk_configuration from colossalai.nn.optimizer.gemini_o...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn.functional as F import colossalai from colossalai.device.device_mesh import DeviceMesh from colossalai.nn._ops._utils import gather_forward_split_backward from colossalai.tensor import ColoParameter, ColoTensor...
from colossalai.tensor import ColoParameter, ColoTensor, ColoTensorSpec, ProcessGroup import torch import pytest from common_utils import tensor_equal import colossalai from colossalai.utils import free_port @pytest.mark.skip def test_multiinheritance(): colossalai.launch(config={}, rank=0, world_size=1, host='lo...
from functools import partial import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp from torch.distributed import ReduceOp from colossalai.core import global_context as gpc from colossalai.device.device_mesh import DeviceMesh from colossalai.initialize import launch from colos...
from functools import partial import pytest import torch import torch.multiprocessing as mp import colossalai from colossalai.tensor import ( ColoParameter, ColoTensorSpec, ComputePattern, ComputeSpec, ProcessGroup, ReplicaSpec, ShardSpec, ) from colossalai.testing import parameterize, rer...
from functools import partial import pytest import torch import torch.multiprocessing as mp from colossalai.core import global_context as gpc from colossalai.device.device_mesh import DeviceMesh from colossalai.initialize import launch from colossalai.logging import disable_existing_loggers from colossalai.tensor.sha...
from functools import partial import pytest import torch import torch.multiprocessing as mp from colossalai.device.device_mesh import DeviceMesh from colossalai.initialize import launch from colossalai.logging import disable_existing_loggers from colossalai.tensor.shape_consistency import CollectiveCommPattern, Shape...
import torch import pytest from functools import partial import torch.multiprocessing as mp import torch.distributed as dist import colossalai from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils.cuda import get_current_device from colossalai.utils import free_port from colossalai.tensor im...
import torch from colossalai.device.device_mesh import DeviceMesh from colossalai.tensor.sharding_spec import ShardingSpec, _DimSpec def test_sharding_spec(): physical_mesh_id = torch.arange(0, 16).reshape(2, 8) mesh_shape = (4, 4) # [[0, 1, 2, 3], # [4, 5, 6, 7], # [8, 9, 10,11], # [12,13...
from colossalai.tensor.shape_consistency import ShapeConsistencyManager, CollectiveCommPattern import torch from colossalai.tensor.sharding_spec import _DimSpec, ShardingSpec from colossalai.device.device_mesh import DeviceMesh physical_mesh_id = torch.arange(0, 16).reshape(2, 8) mesh_shape = (4, 4) # [[0, 1, 2, 3], #...
from ._utils import *
import os import random import numpy as np import torch import torch.distributed as dist from torch.testing import assert_close from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.tensor import ComputePattern, ComputeSpec, ShardSpec def set_seed(seed): r...
import torch import pytest from colossalai.tensor import ColoTensor from numpy import allclose import colossalai from colossalai.utils import free_port from colossalai.tensor import ColoTensorSpec from colossalai.core import global_context as gpc import torch.multiprocessing as mp from colossalai.testing import rerun_...
import math import torch import torch.distributed as dist import pytest import colossalai import torch.multiprocessing as mp from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils import free_port from colossalai.tensor import DistSpecManager, ProcessGroup, ShardSpec, ReplicaSpec from functools...
from functools import partial import pytest import torch import torch.multiprocessing as mp from torch.nn.parallel import DistributedDataParallel as DDP import colossalai from colossalai.nn.parallel.data_parallel import ColoDDP from colossalai.tensor import ColoTensor, ColoTensorSpec, ComputePattern, ComputeSpec, Pro...
from functools import partial import pytest import torch import torch.multiprocessing as mp import colossalai from colossalai.nn.optimizer import ColossalaiOptimizer from colossalai.tensor import ColoTensor, ProcessGroup from colossalai.tensor.colo_parameter import ColoParameter from colossalai.testing import rerun_i...
from copy import deepcopy from functools import partial import pytest import torch import torch.multiprocessing as mp import colossalai from colossalai.nn.parallel.layers import check_colo_module, init_colo_module from colossalai.tensor import ( ColoTensor, ColoTensorSpec, ComputePattern, ComputeSpec,...
import copy from functools import partial import pytest import torch import torch.multiprocessing as mp import colossalai from colossalai.amp import convert_to_apex_amp, convert_to_naive_amp from colossalai.testing import assert_close_loose, rerun_if_address_is_in_use from colossalai.utils import free_port from tests...
import copy from functools import partial import pytest import torch import torch.multiprocessing as mp import colossalai from colossalai.amp import convert_to_apex_amp, convert_to_torch_amp from colossalai.testing import assert_close_loose, rerun_if_address_is_in_use from colossalai.utils import free_port from tests...
import os from functools import partial from pathlib import Path import colossalai import pytest import torch import torch.multiprocessing as mp from colossalai.amp import AMP_TYPE from colossalai.trainer import Trainer, hooks from colossalai.context import ParallelMode from colossalai.testing import rerun_if_address...
import os from functools import partial from pathlib import Path import colossalai import pytest import torch import torch.multiprocessing as mp from colossalai.amp import AMP_TYPE from colossalai.trainer import Trainer, hooks from colossalai.context import ParallelMode from colossalai.testing import rerun_if_address...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from functools import partial import pytest import torch import torch.multiprocessing as mp from colossalai.core import global_context as gpc from colossalai.logging import disable_existing_loggers from colossalai.initialize import launch from colossalai.utils import fr...
import colossalai import torch from colossalai.fx.passes.utils import get_leaf, get_top, assign_bfs_level_to_nodes from colossalai.fx import ColoTracer from torch.fx import GraphModule from colossalai.fx.passes.meta_info_prop import MetaInfoProp, TensorMetadata class MLP(torch.nn.Module): def __init__(self, dim:...
import torch import torch.nn as nn from colossalai.fx.proxy import ColoProxy from colossalai.fx.tracer.tracer import ColoTracer from torch.fx import GraphModule import pytest class Conv1D(nn.Module): def __init__(self, nf, nx): super().__init__() self.nf = nf w = torch.empty(nx, nf) ...
from functools import partial import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp import torch.nn as nn import colossalai from colossalai.fx import ColoTracer from colossalai.fx.passes.shard_1d_pass import transformer_mlp_pass from colossalai.tensor import ProcessGroup from ...
import torch from colossalai.fx._compatibility import is_compatible_with_meta from colossalai.fx.passes.meta_info_prop import MetaInfoProp, TensorMetadata from torch.fx import symbolic_trace if is_compatible_with_meta(): from colossalai.fx.profiler import MetaTensor BATCH_SIZE = 2 DIM_IN = 4 DIM_OUT = 16 def me...
import colossalai import colossalai.nn as col_nn import pytest import torch import torch.nn as nn from colossalai.fx._compatibility import is_compatible_with_meta from colossalai.fx.passes.adding_split_node_pass import (split_with_split_nodes_pass, uniform_split_pass) from colossalai.fx.passes.meta_info_prop import Met...
import torch import torch.nn as nn import colossalai import colossalai.nn as col_nn from torch.fx import symbolic_trace from colossalai.fx.passes.adding_split_node_pass import split_with_split_nodes_pass, balanced_split_pass, \ uniform_split_pass, balanced_split_p...
import copy import colossalai import pytest import torch import torch.fx import torch.multiprocessing as mp import torchvision.models as tm from colossalai.core import global_context as gpc from colossalai.fx import ColoGraphModule, ColoTracer from colossalai.fx._compatibility import is_compatible_with_meta from colos...
import pytest import torch import torchvision.models as tm from colossalai.fx import ColoTracer from colossalai.fx._compatibility import is_compatible_with_meta from colossalai.fx.graph_module import ColoGraphModule from colossalai.fx.passes.algorithms import linearize, solver_rotor from colossalai.fx.passes.algorithms...
import copy import re from typing import Callable import pytest import torch import torch.multiprocessing as mp import torchvision.models as tm from torch.fx import GraphModule import colossalai from colossalai.core import global_context as gpc from colossalai.fx import ColoTracer from colossalai.fx._compatibility im...
import torch from torch.nn import functional as F from colossalai.fx.tracer.meta_patch import patched_function def test_conv(): # test F.conv_1d data_1d = torch.rand(3, 16, 10) weight_1d = torch.rand(3, 16, 3) out_1d = F.conv1d(data_1d, weight_1d) patched_out_1d = patched_function.torch_nn_functio...
import torch import torch.nn as nn from torch.fx import GraphModule from torch.utils.checkpoint import checkpoint from colossalai.fx import ColoTracer class MLP(torch.nn.Module): def __init__(self): super().__init__() self.linear1 = torch.nn.Linear(4, 4) self.linear2 = torch.nn.Linear(4,...
import torch from colossalai.fx import ColoGraphModule, ColoTracer class LinearModel(torch.nn.Module): def __init__(self, in_features, out_features): super().__init__() self.linear = torch.nn.Linear(in_features, out_features) def forward(self, x): x = self.linear(x) x = x * ...