repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_model/sharded_model_v2.py
colossalai/legacy/zero/sharded_model/sharded_model_v2.py
# this code is inspired by the DeepSpeed library and implemented with our own design from scratch import functools import itertools from collections import OrderedDict from typing import Any, Iterator, Optional, Tuple import torch import torch.distributed as dist import torch.nn as nn from torch.distributed import Pro...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_model/reduce_scatter.py
colossalai/legacy/zero/sharded_model/reduce_scatter.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the BSD license found in the # LICENSE file in the root directory of this source tree. import functools import os from typing import Callable, Dict, List, Optional, Tuple import torch import torch.distributed as dist from torch ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_model/utils.py
colossalai/legacy/zero/sharded_model/utils.py
import copy import torch from colossalai.legacy.zero.sharded_model import ShardedModelV2 def col_model_deepcopy(sharded_model: ShardedModelV2, other_model: torch.nn.Module): """ copy param of the ShardedModelV2 to other_model. Note the other_model has to be the same as self. """ for zero_param, ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_model/zero_hook.py
colossalai/legacy/zero/sharded_model/zero_hook.py
from typing import Optional import torch import torch.distributed as dist from colossalai.accelerator import get_accelerator from colossalai.legacy.registry import OPHOOKS from colossalai.legacy.zero.gemini.ophooks import BaseOpHook from colossalai.legacy.zero.gemini.stateful_tensor import TensorState from colossalai...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_model/__init__.py
colossalai/legacy/zero/sharded_model/__init__.py
from .sharded_model_v2 import ShardedModelV2 __all__ = ["ShardedModelV2"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_model/_utils.py
colossalai/legacy/zero/sharded_model/_utils.py
from typing import Any, Callable, List, Tuple, Union import torch import torch.nn.functional as F from colossalai.legacy.zero.gemini.stateful_tensor import StatefulTensor def get_gradient_predivide_factor(world_size: int) -> float: factor: int = 1 while world_size % factor == 0 and world_size / factor > fac...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/stateful_tensor_mgr.py
colossalai/legacy/zero/gemini/stateful_tensor_mgr.py
import functools import types from time import time from typing import List from colossalai.accelerator import get_accelerator from .stateful_tensor import StatefulTensor, TensorState from .tensor_placement_policy import TensorPlacementPolicy from .tensor_utils import colo_model_data_tensor_move_inline class Statef...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/gemini_context.py
colossalai/legacy/zero/gemini/gemini_context.py
from enum import EnumMeta class GeminiMemoryManager(object): def __init__(self, states_cls: EnumMeta): super().__init__() self.states_cls = states_cls self._cnter = 0 # the counter of instances self.total_mem = dict() self.state_mem = dict() self.state_mem["cpu"] ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/tensor_utils.py
colossalai/legacy/zero/gemini/tensor_utils.py
from typing import Tuple, Union import torch from .stateful_tensor import StatefulTensor def is_storage_empty(tensor: torch.Tensor) -> bool: return tensor.storage().size() == 0 def free_storage(tensor: torch.Tensor) -> None: if not is_storage_empty(tensor): tensor.storage().resize_(0) def alloc_...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/tensor_placement_policy.py
colossalai/legacy/zero/gemini/tensor_placement_policy.py
import functools from abc import ABC, abstractmethod from time import time from typing import List, Optional, Type import torch from colossalai.accelerator import get_accelerator from colossalai.legacy.utils.memory import colo_device_memory_capacity from colossalai.zero.gemini.memory_tracer import MemStatsCollector ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/stateful_tensor.py
colossalai/legacy/zero/gemini/stateful_tensor.py
from enum import Enum from typing import Optional, Union import torch from .gemini_context import GeminiMemoryManager def sizeof_tensor(tensor: torch.Tensor): return tensor.numel() * tensor.element_size() class TensorState(Enum): FREE = 0 HOLD = 1 HOLD_AFTER_FWD = 2 HOLD_AFTER_BWD = 3 COMP...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/__init__.py
colossalai/legacy/zero/gemini/__init__.py
from .colo_init_context import ColoInitContext, post_process_colo_init_ctx from .ophooks import BaseOpHook, register_ophooks_recursively from .stateful_tensor import StatefulTensor from .stateful_tensor_mgr import StatefulTensorMgr from .tensor_placement_policy import AutoTensorPlacementPolicy, CPUTensorPlacementPolicy...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/colo_init_context.py
colossalai/legacy/zero/gemini/colo_init_context.py
from typing import Any, Iterator, Optional, Tuple, Union import torch from torch import nn from colossalai.legacy.tensor import ProcessGroup from colossalai.tensor import ColoParameter, ColoTensor from colossalai.utils.model.utils import InsertPostInitMethodToModuleSubClasses # find named_params includes replica d...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/ophooks/runtime_mem_tracer_hook.py
colossalai/legacy/zero/gemini/ophooks/runtime_mem_tracer_hook.py
from contextlib import contextmanager from enum import Enum from functools import partial from typing import List import torch from colossalai.legacy.zero.gemini.tensor_utils import alloc_storage, free_storage from colossalai.tensor.param_op_hook import ColoParamOpHook from colossalai.zero.gemini.memory_tracer import...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/ophooks/_shard_param_ophook.py
colossalai/legacy/zero/gemini/ophooks/_shard_param_ophook.py
import torch from colossalai.legacy.registry import OPHOOKS from . import BaseOpHook @OPHOOKS.register_module class ShardParamHook(BaseOpHook): """ A hook to process sharded param before and after FWD and BWD operator executing. """ def __init__(self): super().__init__() def niter(self...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/ophooks/utils.py
colossalai/legacy/zero/gemini/ophooks/utils.py
# this code is inspired by the DeepSpeed library and implemented with our own design from scratch from abc import ABC, abstractmethod from typing import Callable, List, Optional import torch class BaseOpHook(ABC): """This class allows users to add customized operations before and after the execution of a PyT...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/ophooks/__init__.py
colossalai/legacy/zero/gemini/ophooks/__init__.py
from .utils import BaseOpHook, register_ophooks_recursively __all__ = ["BaseOpHook", "register_ophooks_recursively"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/ophooks/_shard_grad_ophook.py
colossalai/legacy/zero/gemini/ophooks/_shard_grad_ophook.py
import torch from colossalai.legacy.registry import OPHOOKS from . import BaseOpHook @OPHOOKS.register_module class ShardGradMemTracerHook(BaseOpHook): """ A hook to process sharded param before and after FWD and BWD operator executing. """ def __init__(self): super().__init__() def pr...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/paramhooks/_param_hookmgr.py
colossalai/legacy/zero/gemini/paramhooks/_param_hookmgr.py
import functools from typing import Callable, List import torch class BaseParamHookMgr(object): def __init__(self, param_list: List[torch.nn.Parameter]) -> None: r""" register backward hook on every parameters of module """ self._param_list = param_list self._hook_list = [...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/paramhooks/__init__.py
colossalai/legacy/zero/gemini/paramhooks/__init__.py
from ._param_hookmgr import BaseParamHookMgr __all__ = ["BaseParamHookMgr"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/builder/__init__.py
colossalai/legacy/builder/__init__.py
from .builder import build_from_config, build_from_registry, build_gradient_handler __all__ = ["build_gradient_handler", "build_from_config", "build_from_registry"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/builder/builder.py
colossalai/legacy/builder/builder.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import inspect from colossalai.legacy.registry import * def build_from_config(module, config: dict): """Returns an object of :class:`module` constructed from `config`. Args: module: A python or user-defined class config: A python dict containi...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/communication/p2p_v2.py
colossalai/legacy/communication/p2p_v2.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import io import pickle from typing import Any, List, Tuple, Union import torch import torch.distributed as dist from torch.distributed import ProcessGroupNCCL from torch.distributed import distributed_c10d as c10d from colossalai.legacy.context.parallel_mode import Pa...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/communication/utils.py
colossalai/legacy/communication/utils.py
from typing import List, Tuple, Union import torch import torch.distributed as dist from colossalai.accelerator import get_accelerator from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc TensorShape = Union[torch.Size, List[int], Tuple[int]] def...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/communication/__init__.py
colossalai/legacy/communication/__init__.py
from .collective import all_gather, all_reduce, broadcast, reduce, reduce_scatter from .p2p import ( recv_backward, recv_forward, send_backward, send_backward_recv_backward, send_backward_recv_forward, send_forward, send_forward_backward_recv_forward_backward, send_forward_recv_backward,...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/communication/ring.py
colossalai/legacy/communication/ring.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch from colossalai.accelerator import get_accelerator from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc def ring_forward(tensor_send_next: torch.Tensor, parallel_mode: ParallelMode) -> t...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/communication/p2p.py
colossalai/legacy/communication/p2p.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import operator from functools import reduce from typing import List, Tuple, Union import torch import torch.distributed as dist from colossalai.accelerator import get_accelerator from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.c...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/communication/collective.py
colossalai/legacy/communication/collective.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch import torch.distributed as dist from torch import Tensor from torch.distributed import ReduceOp from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import global_context as gpc _all_gather_func = dist._all_gather_base if "all_ga...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/trainer/_trainer.py
colossalai/legacy/trainer/_trainer.py
from typing import Any, List, Union import torch from torch.utils.data import DataLoader from tqdm import tqdm from colossalai.legacy.engine import Engine from colossalai.legacy.trainer.hooks import BaseHook from colossalai.legacy.utils import is_dp_rank_0, is_no_pp_or_last_stage, is_tp_rank_0 from colossalai.logging...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/trainer/__init__.py
colossalai/legacy/trainer/__init__.py
from ._trainer import Trainer __all__ = ["Trainer"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/trainer/hooks/_log_hook.py
colossalai/legacy/trainer/hooks/_log_hook.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import os import os.path as osp from typing import List from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.registry import HOOKS from colossalai.legacy.trainer.hooks._metric_hook import Thro...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/trainer/hooks/_commons_.py
colossalai/legacy/trainer/hooks/_commons_.py
import torch def _format_number(val, prec=5): if isinstance(val, float): return f"{val:.{prec}g}" elif torch.is_tensor(val) and torch.is_floating_point(val): return f"{val.item():.{prec}g}" return val
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/trainer/hooks/_checkpoint_hook.py
colossalai/legacy/trainer/hooks/_checkpoint_hook.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch from colossalai.legacy.registry import HOOKS from colossalai.legacy.trainer.hooks import BaseHook from colossalai.legacy.utils.checkpointing import save_checkpoint from colossalai.logging import get_dist_logger from ._lr_scheduler_hook import LRSchedulerHoo...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/trainer/hooks/_lr_scheduler_hook.py
colossalai/legacy/trainer/hooks/_lr_scheduler_hook.py
from torch import Tensor from colossalai.legacy.registry import HOOKS from ._metric_hook import LearningRateMetric, MetricHook @HOOKS.register_module class LRSchedulerHook(MetricHook): r"""Build LR scheduler for trainer. Args: lr_scheduler (:class:`colossalai.nn.lr_scheduler`): The specific LR sche...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/trainer/hooks/__init__.py
colossalai/legacy/trainer/hooks/__init__.py
from ._base_hook import BaseHook from ._checkpoint_hook import SaveCheckpointHook from ._log_hook import ( LogMemoryByEpochHook, LogMetricByEpochHook, LogMetricByStepHook, LogTimingByEpochHook, TensorboardHook, ) from ._lr_scheduler_hook import LRSchedulerHook from ._metric_hook import AccuracyHook,...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/trainer/hooks/_metric_hook.py
colossalai/legacy/trainer/hooks/_metric_hook.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC, abstractmethod from typing import Callable import torch import torch.distributed as dist from colossalai.accelerator import get_accelerator from colossalai.legacy.communication import all_reduce from colossalai.legacy.context import ParallelMode fr...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/trainer/hooks/_base_hook.py
colossalai/legacy/trainer/hooks/_base_hook.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC from torch import Tensor class BaseHook(ABC): """This class allows users to add desired actions in specific time points during training or evaluation. :param priority: Priority in the printing, hooks with small priority will be printed...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/amp_type.py
colossalai/legacy/amp/amp_type.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from enum import Enum class AMP_TYPE(Enum): APEX = "apex" TORCH = "torch" NAIVE = "naive"
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/__init__.py
colossalai/legacy/amp/__init__.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.nn as nn from torch.nn.modules.loss import _Loss from torch.optim import Optimizer from colossalai.context import Config from .amp_type import AMP_TYPE from .apex_amp import convert_to_apex_amp from .naive_amp import convert_to_naive_amp from .torch_amp im...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/torch_amp/torch_amp.py
colossalai/legacy/amp/torch_amp/torch_amp.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.nn as nn from torch import Tensor from torch.nn.modules.loss import _Loss from torch.optim import Optimizer from colossalai.accelerator import get_accelerator from colossalai.interface import OptimizerWrapper from colossalai.legacy.utils import clip_grad_no...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/torch_amp/__init__.py
colossalai/legacy/amp/torch_amp/__init__.py
from typing import Optional import torch.nn as nn from torch.nn.modules.loss import _Loss from torch.optim import Optimizer from colossalai.context import Config from .torch_amp import TorchAMPLoss, TorchAMPModel, TorchAMPOptimizer def convert_to_torch_amp( model: nn.Module, optimizer: Optimizer, criterion: Op...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/torch_amp/_grad_scaler.py
colossalai/legacy/amp/torch_amp/_grad_scaler.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- # modified from https://github.com/pytorch/pytorch/blob/master/torch/cuda/amp/grad_scaler.py # to support tensor parallel import warnings from collections import abc, defaultdict from enum import Enum from typing import Any, Dict, List, Optional, Tuple import torch impo...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/apex_amp/__init__.py
colossalai/legacy/amp/apex_amp/__init__.py
import torch.nn as nn from torch.optim import Optimizer from .apex_amp import ApexAMPOptimizer def convert_to_apex_amp(model: nn.Module, optimizer: Optimizer, amp_config): r"""A helper function to wrap training components with Apex AMP modules Args: model (:class:`torch.nn.Module`): your model objec...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/apex_amp/apex_amp.py
colossalai/legacy/amp/apex_amp/apex_amp.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.nn as nn try: import apex.amp as apex_amp except ImportError: pass from torch import Tensor from colossalai.interface import OptimizerWrapper from colossalai.legacy.utils import clip_grad_norm_fp32 class ApexAMPOptimizer(OptimizerWrapper): "...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/naive_amp/naive_amp.py
colossalai/legacy/amp/naive_amp/naive_amp.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import Any import torch import torch.distributed as dist import torch.nn as nn from torch import Tensor from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from torch.distributed import ReduceOp from torch.optim import Optimizer from c...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/naive_amp/__init__.py
colossalai/legacy/amp/naive_amp/__init__.py
import inspect import torch.nn as nn from torch.optim import Optimizer from colossalai.amp.naive_amp.grad_scaler import ConstantGradScaler, DynamicGradScaler from colossalai.legacy.utils import is_no_pp_or_last_stage from ._fp16_optimizer import FP16Optimizer from .naive_amp import NaiveAMPModel, NaiveAMPOptimizer ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/naive_amp/_utils.py
colossalai/legacy/amp/naive_amp/_utils.py
from typing import List from torch import Tensor def has_inf_or_nan(tensor): """Check if tensor has inf or nan values. Args: tensor (:class:`torch.Tensor`): a torch tensor object Returns: bool: Whether the tensor has inf or nan. True for yes and False for no. """ try: # ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/naive_amp/_fp16_optimizer.py
colossalai/legacy/amp/naive_amp/_fp16_optimizer.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch import torch.distributed as dist from torch.distributed import ProcessGroup from torch.optim import Optimizer from colossalai.amp.naive_amp.grad_scaler import BaseGradScaler from colossalai.kernel.kernel_loader import FusedOptimizerLoader from colossalai.le...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/common.py
colossalai/legacy/utils/common.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from collections import defaultdict from contextlib import contextmanager from typing import Dict, List, Optional, Union import torch import torch.distributed as dist from torch import inf from torch.nn.parameter import Parameter from colossalai.legacy.constants import ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/checkpointing.py
colossalai/legacy/utils/checkpointing.py
from collections import OrderedDict from itertools import chain import torch import torch.distributed as dist from colossalai.legacy.constants import IS_TENSOR_PARALLEL from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc try: from torch.nn.mod...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/memory.py
colossalai/legacy/utils/memory.py
import gc from collections import namedtuple import psutil import torch import torch.distributed as dist from packaging import version from colossalai.accelerator import get_accelerator from colossalai.legacy.core import global_context as gpc from colossalai.logging import get_dist_logger _GLOBAL_CUDA_MEM_FRACTION =...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/__init__.py
colossalai/legacy/utils/__init__.py
from .checkpointing import load_checkpoint, save_checkpoint from .common import ( clip_grad_norm_fp32, copy_tensor_parallel_attributes, count_zeros_fp32, is_dp_rank_0, is_model_parallel_parameter, is_no_pp_or_last_stage, is_tp_rank_0, is_using_ddp, is_using_pp, is_using_sequence,...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/activation_checkpoint.py
colossalai/legacy/utils/activation_checkpoint.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import weakref import torch from torch.utils.checkpoint import check_backward_validity, detach_variable from colossalai.accelerator import get_accelerator from colossalai.legacy.context.random import get_current_mode, get_states, set_mode, set_seed_states, sync_states ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/profiler/profiler.py
colossalai/legacy/utils/profiler/profiler.py
import gzip import json import os import tempfile from typing import Any, Callable, Iterable, List, Optional from torch.autograd import ProfilerActivity from torch.profiler import profile as torch_profile from torch.profiler.profiler import ProfilerAction from colossalai.legacy.engine import Engine from colossalai.le...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/profiler/extention.py
colossalai/legacy/utils/profiler/extention.py
from abc import ABC, abstractmethod class ProfilerExtension(ABC): @abstractmethod def prepare_trace(self): pass @abstractmethod def start_trace(self): pass @abstractmethod def stop_trace(self): pass @abstractmethod def extend_chrome_trace(self, trace: dict) -...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/profiler/stateful_tensor_mem_extention.py
colossalai/legacy/utils/profiler/stateful_tensor_mem_extention.py
import os import threading import time from enum import Enum from typing import List import torch from colossalai.gemini.ophooks import BaseOpHook from colossalai.gemini.stateful_tensor import StatefulTensor from colossalai.legacy.engine import Engine from colossalai.legacy.utils.profiler.extention import ProfilerExt...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/profiler/__init__.py
colossalai/legacy/utils/profiler/__init__.py
from .legacy import * from .profiler import profile
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/profiler/legacy/comm_profiler.py
colossalai/legacy/utils/profiler/legacy/comm_profiler.py
import inspect from functools import partial from pathlib import Path from typing import List, Optional import torch import torch.distributed as dist from torch.autograd.profiler import profile from torch.distributed import ReduceOp from colossalai.accelerator import get_accelerator from .prof_utils import BaseProfi...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/profiler/legacy/prof_utils.py
colossalai/legacy/utils/profiler/legacy/prof_utils.py
from abc import ABC, abstractmethod from pathlib import Path from typing import List, Union from colossalai.legacy.core import global_context as gpc # copied from high version pytorch to support low version def _format_time(time_us): """Defines how to format time in FunctionEvent""" US_IN_SECOND = 1000.0 * 1...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/profiler/legacy/pcie_profiler.py
colossalai/legacy/utils/profiler/legacy/pcie_profiler.py
from pathlib import Path from typing import List from torch.autograd.profiler import profile from .prof_utils import BaseProfiler, _format_bandwidth, _format_memory, _format_time def _get_size(dtype: str): if dtype == "fp16": return 2 elif dtype == "fp32": return 4 else: raise No...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/profiler/legacy/__init__.py
colossalai/legacy/utils/profiler/legacy/__init__.py
from .comm_profiler import CommProfiler from .mem_profiler import MemProfiler from .pcie_profiler import PcieProfiler from .prof_utils import BaseProfiler, ProfilerContext __all__ = ["BaseProfiler", "CommProfiler", "PcieProfiler", "MemProfiler", "ProfilerContext"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/data_sampler/base_sampler.py
colossalai/legacy/utils/data_sampler/base_sampler.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC, abstractmethod class BaseSampler(ABC): def __init__(self, dataset, batch_size): self.dataset = dataset self.batch_size = batch_size @abstractmethod def __len__(self): pass @abstractmethod def __iter__(s...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/data_sampler/__init__.py
colossalai/legacy/utils/data_sampler/__init__.py
from .base_sampler import BaseSampler from .data_parallel_sampler import DataParallelSampler, get_dataloader __all__ = ["BaseSampler", "DataParallelSampler", "get_dataloader"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/data_sampler/data_parallel_sampler.py
colossalai/legacy/utils/data_sampler/data_parallel_sampler.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- # adapted from torch.utils.data.DistributedSampler import math import random from typing import Iterator, TypeVar import numpy as np import torch from torch.utils.data import DataLoader, Dataset, Sampler from colossalai.legacy.context.parallel_mode import ParallelMode ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/checkpoint/module_checkpoint.py
colossalai/legacy/utils/checkpoint/module_checkpoint.py
from typing import Dict, Optional import torch import torch.distributed as dist from colossalai.interface import OptimizerWrapper from colossalai.tensor import ColoTensor from .utils import gather_tensor, scatter_tensor def save_checkpoint( path: str, epoch: int, model: torch.nn.Module, optimizer: ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/checkpoint/utils.py
colossalai/legacy/utils/checkpoint/utils.py
import torch import torch.distributed as dist from colossalai.legacy.tensor import ColoTensorSpec from colossalai.legacy.tensor.distspec import DistPlacementPattern, _DistSpec from colossalai.tensor import ColoTensor def robust_broadcast(tensor): with torch.no_grad(): is_cpu_ten = tensor.device.type == "...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/checkpoint/__init__.py
colossalai/legacy/utils/checkpoint/__init__.py
from .module_checkpoint import load_checkpoint, save_checkpoint __all__ = ["save_checkpoint", "load_checkpoint"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/tensor/op_wrapper.py
colossalai/legacy/tensor/op_wrapper.py
import functools from typing import Callable, Dict # Custom sharded ops _COLOSSAL_OPS: Dict[str, Callable] = {} def _register_colo_op(op, func): global _COLOSSAL_OPS _COLOSSAL_OPS[op] = func def colo_op_impl(func): """ Provides a way for users to write their own custom operator. This can be use...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/tensor/tensor_spec.py
colossalai/legacy/tensor/tensor_spec.py
from dataclasses import dataclass, field from typing import Optional from colossalai.legacy.tensor.distspec import DistPlacementPattern, _DistSpec from colossalai.legacy.tensor.process_group import ProcessGroup from .compute_spec import ComputeSpec @dataclass class ColoTensorSpec: """ColoTensorSpec A data ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/tensor/dist_spec_mgr.py
colossalai/legacy/tensor/dist_spec_mgr.py
from contextlib import contextmanager import torch import torch.distributed as dist from numpy import prod from colossalai.legacy.tensor.distspec import DistPlacementPattern, _DistSpec from colossalai.legacy.tensor.process_group import ProcessGroup # TODO(jiaruifang) circle import, move the divide to colossalai.com...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/tensor/const.py
colossalai/legacy/tensor/const.py
from enum import Enum class TensorType(Enum): MODEL = 0 NONMODEL = 1 # mainly activations
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/tensor/compute_spec.py
colossalai/legacy/tensor/compute_spec.py
from enum import Enum class ComputePattern(Enum): TP1D = 0 TP2D = 1 TP2P5D = 2 TP3D = 3 class ComputeSpec(object): """ComputeSpec The Specification for computation pattern Args: compute_pattern (ComputePattern): an Enum instance for compute pattern. """ def __init__(sel...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/tensor/__init__.py
colossalai/legacy/tensor/__init__.py
from . import distspec from .compute_spec import ComputePattern, ComputeSpec from .dist_spec_mgr import DistSpecManager from .distspec import ReplicaSpec, ShardSpec from .process_group import ProcessGroup from .tensor_spec import ColoTensorSpec __all__ = [ "ComputePattern", "ComputeSpec", "distspec", "...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/tensor/process_group.py
colossalai/legacy/tensor/process_group.py
from typing import List, Optional import torch from colossalai.context.singleton_meta import SingletonMeta from colossalai.logging import get_dist_logger class PyTorchProcessGroupDict(metaclass=SingletonMeta): def __init__(self): # distributed settings # use this dict to record all Pytorch Proce...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/tensor/distspec.py
colossalai/legacy/tensor/distspec.py
from enum import Enum from typing import List __all__ = ["ReplicaSpec", "ShardSpec"] class DistPlacementPattern(Enum): REPLICATE = "r" SHARD = "s" class _DistSpec: """_DistSpec A class indicates Distributed Specification. The DistSpec is only works for the tensor parallel process groups. B...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/__init__.py
colossalai/legacy/nn/__init__.py
from .layer import * from .loss import * from .metric import *
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/loss/loss_2p5d.py
colossalai/legacy/nn/loss/loss_2p5d.py
import torch import torch.distributed as dist from torch.cuda.amp import custom_bwd, custom_fwd from torch.nn.functional import cross_entropy from torch.nn.modules.loss import _Loss from colossalai.accelerator import get_accelerator from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/loss/loss_3d.py
colossalai/legacy/nn/loss/loss_3d.py
import torch import torch.distributed as dist from torch.cuda.amp import custom_bwd, custom_fwd from torch.nn.functional import cross_entropy from torch.nn.modules.loss import _Loss from colossalai.accelerator import get_accelerator from colossalai.legacy.constants import INPUT_GROUP_3D, OUTPUT_GROUP_3D, WEIGHT_GROUP_...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/loss/loss_2d.py
colossalai/legacy/nn/loss/loss_2d.py
import torch import torch.distributed as dist from torch.cuda.amp import custom_bwd, custom_fwd from torch.nn.functional import cross_entropy from torch.nn.modules.loss import _Loss from colossalai.accelerator import get_accelerator from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/loss/loss_1d.py
colossalai/legacy/nn/loss/loss_1d.py
import torch import torch.distributed as dist from torch.cuda.amp import custom_bwd, custom_fwd from torch.nn.modules.loss import _Loss from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.registry import LOSSES class _VocabParallelCrossEn...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/loss/__init__.py
colossalai/legacy/nn/loss/__init__.py
from torch import nn from torch.nn.modules.loss import * from torch.nn.modules.loss import _Loss from colossalai.legacy.global_variables import tensor_parallel_env as env from colossalai.legacy.nn.layer.utils import get_tensor_parallel_mode from .loss_1d import VocabParallelCrossEntropyLoss1D from .loss_2d import Cro...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/_ops/__init__.py
colossalai/legacy/nn/_ops/__init__.py
from ._utils import *
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/_ops/_utils.py
colossalai/legacy/nn/_ops/_utils.py
from typing import List, Optional, Union import torch import torch.distributed as dist from colossalai.legacy.global_variables import tensor_parallel_env as env from colossalai.legacy.nn.layer.utils import divide from colossalai.legacy.tensor import ColoTensorSpec, ProcessGroup from colossalai.tensor import ColoTenso...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/__init__.py
colossalai/legacy/nn/layer/__init__.py
from .colossalai_layer import * from .parallel_1d import * from .parallel_2d import * from .parallel_2p5d import * from .parallel_3d import * from .parallel_sequence import * from .utils import * from .vanilla import * from .wrapper import *
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/base_layer.py
colossalai/legacy/nn/layer/base_layer.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from contextlib import contextmanager import torch.nn as nn from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import global_context as gpc class ParallelLayer(nn.Module): global_state_dict: bool = True def __init__(self): ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_sequence/_operation.py
colossalai/legacy/nn/layer/parallel_sequence/_operation.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch from torch import distributed as dist from torch.cuda.amp import custom_bwd, custom_fwd from colossalai.accelerator import get_accelerator from colossalai.legacy.communication import ring_forward from colossalai.legacy.context.parallel_mode import ParallelM...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_sequence/__init__.py
colossalai/legacy/nn/layer/parallel_sequence/__init__.py
from ._operation import RingAV, RingQK from .layers import TransformerSelfAttentionRing __all__ = ["TransformerSelfAttentionRing", "RingAV", "RingQK"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_sequence/_utils.py
colossalai/legacy/nn/layer/parallel_sequence/_utils.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- def _calc_incoming_device_range(i, rank, world_size, sub_seq_length): device_of_incoming_k = (rank - i - 1) % world_size start_idx = sub_seq_length * device_of_incoming_k end_idx = sub_seq_length * (device_of_incoming_k + 1) return start_idx, end_idx d...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_sequence/layers.py
colossalai/legacy/nn/layer/parallel_sequence/layers.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import math import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from colossalai.legacy.context import seed from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context a...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/utils/common.py
colossalai/legacy/nn/layer/utils/common.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import collections.abc from itertools import repeat import numpy as np import torch from torch import Tensor, nn from colossalai.legacy.constants import IS_TENSOR_PARALLEL, NUM_PARTITIONS from colossalai.legacy.global_variables import tensor_parallel_env as env from co...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/utils/__init__.py
colossalai/legacy/nn/layer/utils/__init__.py
from .common import ( ACT2FN, CheckpointModule, _ntuple, divide, get_tensor_parallel_mode, set_tensor_parallel_attribute_by_partition, set_tensor_parallel_attribute_by_size, to_2tuple, ) __all__ = [ "CheckpointModule", "divide", "ACT2FN", "set_tensor_parallel_attribute_b...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/vanilla/__init__.py
colossalai/legacy/nn/layer/vanilla/__init__.py
from .layers import ( DropPath, VanillaClassifier, VanillaLayerNorm, VanillaLinear, VanillaPatchEmbedding, WrappedDropout, WrappedDropPath, ) __all__ = [ "VanillaLayerNorm", "VanillaPatchEmbedding", "VanillaClassifier", "DropPath", "WrappedDropout", "WrappedDropPath"...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/vanilla/layers.py
colossalai/legacy/nn/layer/vanilla/layers.py
import math from typing import Callable import torch import torch.nn.functional as F from torch import Tensor from torch import nn as nn from torch.nn.parameter import Parameter from colossalai.accelerator import get_accelerator from colossalai.legacy.context import seed from colossalai.legacy.registry import LAYERS ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/wrapper/pipeline_wrapper.py
colossalai/legacy/nn/layer/wrapper/pipeline_wrapper.py
from typing import List, Tuple, Union import torch.distributed as dist import torch.nn as nn from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import global_context as gpc class PipelineSharedModuleWrapper: def __init__(self, pipeline_ranks: Union[List[int], Tuple[int]]) -> None: ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/wrapper/__init__.py
colossalai/legacy/nn/layer/wrapper/__init__.py
from .pipeline_wrapper import PipelineSharedModuleWrapper __all__ = ["PipelineSharedModuleWrapper"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_2d/_operation.py
colossalai/legacy/nn/layer/parallel_2d/_operation.py
from typing import Any, Optional, Tuple import torch import torch.distributed as dist from torch import Tensor from torch.cuda.amp import custom_bwd, custom_fwd from colossalai.accelerator import get_accelerator from colossalai.legacy.communication.collective import all_gather, all_reduce, reduce_scatter from colossa...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_2d/__init__.py
colossalai/legacy/nn/layer/parallel_2d/__init__.py
from ._operation import reduce_by_batch_2d, split_batch_2d from .layers import ( Classifier2D, Embedding2D, LayerNorm2D, Linear2D, PatchEmbedding2D, VocabParallelClassifier2D, VocabParallelEmbedding2D, ) __all__ = [ "split_batch_2d", "reduce_by_batch_2d", "Linear2D", "LayerN...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_2d/_utils.py
colossalai/legacy/nn/layer/parallel_2d/_utils.py
from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.global_variables import tensor_parallel_env as env def get_summa_dim_from_env() -> int: try: summa_dim = env.summa_dim assert summa_dim > 0, "SUMMA_DIM m...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_2d/layers.py
colossalai/legacy/nn/layer/parallel_2d/layers.py
import math from collections import OrderedDict from typing import Callable import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from torch.nn import Parameter from colossalai.accelerator import get_accelerator from colossalai.legacy.communication import broadcast from colossala...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/colossalai_layer/dropout.py
colossalai/legacy/nn/layer/colossalai_layer/dropout.py
import torch.nn as nn from colossalai.legacy.context import ParallelMode, seed from ..parallel_1d import * from ..utils import get_tensor_parallel_mode from ._utils import ColossalaiModule class Dropout(ColossalaiModule): """Dropout layer of colossalai. Args: p (float, optional): probability of an ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false