repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/dp_plugin_base.py
colossalai/booster/plugin/dp_plugin_base.py
import random import numpy as np import torch import torch.distributed as dist from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from .plugin_base import Plugin class DPPluginBase(Plugin): """This is a base class for all DP plugins. It sets up world size and ran...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/plugin_base.py
colossalai/booster/plugin/plugin_base.py
from abc import ABC, abstractmethod from typing import Callable, Dict, Iterator, List, Optional, Tuple import torch.nn as nn from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler as LRScheduler from torch.utils.data import DataLoader, Dataset from colossalai.checkpoint_io import Checkpoi...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/pp_plugin_base.py
colossalai/booster/plugin/pp_plugin_base.py
from abc import abstractmethod from typing import Any, Callable, Iterator, Optional import torch from colossalai.interface import ModelWrapper, OptimizerWrapper from .plugin_base import Plugin class PipelinePluginBase(Plugin): @abstractmethod def execute_pipeline( self, data_iter: Iterator,...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/__init__.py
colossalai/booster/plugin/__init__.py
from .gemini_plugin import GeminiPlugin from .hybrid_parallel_plugin import HybridParallelPlugin from .low_level_zero_plugin import LowLevelZeroPlugin from .moe_hybrid_parallel_plugin import MoeHybridParallelPlugin from .plugin_base import Plugin from .torch_ddp_plugin import TorchDDPPlugin __all__ = [ "Plugin", ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/gemini_plugin.py
colossalai/booster/plugin/gemini_plugin.py
import os import random from pathlib import Path from typing import Callable, Dict, Iterator, List, Optional, Tuple import numpy as np import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.distributed_c10d import _get_default_group from torch.optim import Optimizer from torch.optim...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/moe_hybrid_parallel_plugin.py
colossalai/booster/plugin/moe_hybrid_parallel_plugin.py
from collections import defaultdict from types import MethodType from typing import Callable, List, Optional, OrderedDict, Tuple import torch import torch.distributed as dist from torch.distributed import ProcessGroup from torch.nn import Module from torch.optim import Optimizer from torch.optim.lr_scheduler import _L...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/hybrid_parallel_plugin.py
colossalai/booster/plugin/hybrid_parallel_plugin.py
import ctypes import random from collections import defaultdict from contextlib import contextmanager, nullcontext from copy import deepcopy from functools import partial from types import MethodType from typing import Any, Callable, Dict, Iterator, List, Optional, OrderedDict, Tuple, Union import numpy as np import t...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/mixed_precision/fp16_naive.py
colossalai/booster/mixed_precision/fp16_naive.py
from .mixed_precision_base import MixedPrecision class FP16NaiveMixedPrecision(MixedPrecision): """ Precision for mixed precision training in FP16 using naive AMP. Args: log_num_zeros_in_grad(bool): return number of zeros in the gradients. initial_scale(int): initial scale of gradient scaler. ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/mixed_precision/bf16.py
colossalai/booster/mixed_precision/bf16.py
from .mixed_precision_base import MixedPrecision class BF16MixedPrecision(MixedPrecision): pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/mixed_precision/fp16_apex.py
colossalai/booster/mixed_precision/fp16_apex.py
from typing import Any, Optional, Union import torch from .mixed_precision_base import MixedPrecision class FP16ApexMixedPrecision(MixedPrecision): """ Precision for mixed precision training in FP16 using apex AMP. Args: opt_level(str, optional, default="O1" ): Pure or mixed precision optimizat...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/mixed_precision/fp16_torch.py
colossalai/booster/mixed_precision/fp16_torch.py
from typing import Callable, Optional, Tuple, Union import torch import torch.nn as nn from torch import Tensor from torch.optim import Optimizer from colossalai.accelerator import get_accelerator from colossalai.interface import ModelWrapper, OptimizerWrapper from .mixed_precision_base import MixedPrecision __all_...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/mixed_precision/fp8.py
colossalai/booster/mixed_precision/fp8.py
from .mixed_precision_base import MixedPrecision class FP8MixedPrecision(MixedPrecision): pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/mixed_precision/__init__.py
colossalai/booster/mixed_precision/__init__.py
from .bf16 import BF16MixedPrecision from .fp8 import FP8MixedPrecision from .fp16_apex import FP16ApexMixedPrecision from .fp16_naive import FP16NaiveMixedPrecision from .fp16_torch import FP16TorchMixedPrecision from .mixed_precision_base import MixedPrecision __all__ = [ "MixedPrecision", "mixed_precision_f...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/mixed_precision/mixed_precision_base.py
colossalai/booster/mixed_precision/mixed_precision_base.py
from abc import ABC, abstractmethod from typing import Callable, Optional, Tuple import torch.nn as nn from torch.optim import Optimizer from colossalai.interface import OptimizerWrapper class MixedPrecision(ABC): """ An abstract class for mixed precision training. """ @abstractmethod def confi...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/__init__.py
colossalai/amp/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/mixed_precision_optimizer.py
colossalai/amp/naive_amp/mixed_precision_optimizer.py
from typing import Dict, List, Optional, Tuple import torch from torch import Tensor, inf from torch.nn import Module, Parameter from torch.optim import Optimizer from colossalai.interface import OptimizerWrapper from .mixed_precision_mixin import BF16MixedPrecisionMixin, FP16MixedPrecisionMixin class NaiveFP16Mix...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/__init__.py
colossalai/amp/naive_amp/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/grad_scaler/constant_grad_scaler.py
colossalai/amp/naive_amp/grad_scaler/constant_grad_scaler.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from .base_grad_scaler import BaseGradScaler __all__ = ["ConstantGradScaler"] class ConstantGradScaler(BaseGradScaler): """A gradient scaler which uses constant loss scale Args: initial_scale (float): the initial loss scale verbose (bool): whet...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/grad_scaler/base_grad_scaler.py
colossalai/amp/naive_amp/grad_scaler/base_grad_scaler.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC, abstractmethod from typing import Dict import torch from torch import Tensor from colossalai.accelerator import get_accelerator from colossalai.logging import get_dist_logger __all__ = ["BaseGradScaler"] class BaseGradScaler(ABC): """A base ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/grad_scaler/dynamic_grad_scaler.py
colossalai/amp/naive_amp/grad_scaler/dynamic_grad_scaler.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import Optional import torch from colossalai.accelerator import get_accelerator from .base_grad_scaler import BaseGradScaler __all__ = ["DynamicGradScaler"] class DynamicGradScaler(BaseGradScaler): """A gradient scaler which uses dynamic loss scale ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/grad_scaler/__init__.py
colossalai/amp/naive_amp/grad_scaler/__init__.py
from .base_grad_scaler import BaseGradScaler from .constant_grad_scaler import ConstantGradScaler from .dynamic_grad_scaler import DynamicGradScaler __all__ = ["BaseGradScaler", "ConstantGradScaler", "DynamicGradScaler"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/mixed_precision_mixin/fp16.py
colossalai/amp/naive_amp/mixed_precision_mixin/fp16.py
from abc import abstractmethod from enum import Enum import torch import torch.distributed as dist from torch import Tensor from colossalai.accelerator import get_accelerator from colossalai.amp.naive_amp.grad_scaler import DynamicGradScaler from .base import MixedPrecisionMixin class OptimState(Enum): SCALED ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/mixed_precision_mixin/bf16.py
colossalai/amp/naive_amp/mixed_precision_mixin/bf16.py
import torch from torch import Tensor from .base import MixedPrecisionMixin class BF16MixedPrecisionMixin(MixedPrecisionMixin): dtype = torch.bfloat16 def pre_backward(self, loss: Tensor) -> Tensor: return loss def pre_backward_by_grad(self, tensor: Tensor, grad: Tensor) -> Tensor: retu...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/mixed_precision_mixin/__init__.py
colossalai/amp/naive_amp/mixed_precision_mixin/__init__.py
from .base import MixedPrecisionMixin from .bf16 import BF16MixedPrecisionMixin from .fp16 import FP16MixedPrecisionMixin __all__ = [ "MixedPrecisionMixin", "FP16MixedPrecisionMixin", "BF16MixedPrecisionMixin", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/mixed_precision_mixin/base.py
colossalai/amp/naive_amp/mixed_precision_mixin/base.py
from abc import ABC, abstractmethod import torch from torch import Tensor class MixedPrecisionMixin(ABC): """A helper class for mixed precision training. This mixin is used in mixed precision optimizers. Attributes: dtype (torc.dtype): The expected dtype of the gradients. Examples: ```p...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/common.py
colossalai/utils/common.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import functools import os import random from contextlib import contextmanager from pathlib import Path from typing import Callable, Optional, Set import numpy as np import torch import torch.nn as nn from colossalai.accelerator import get_accelerator def get_current_...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/timer.py
colossalai/utils/timer.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import time from typing import Tuple from colossalai.accelerator import get_accelerator class Timer: """A timer object which helps to log the execution times, and provides different tools to assess the times.""" def __init__(self): self._started = Fals...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/memory.py
colossalai/utils/memory.py
from collections import namedtuple import psutil import torch import torch.distributed as dist from colossalai.utils import get_current_device _GLOBAL_CUDA_MEM_FRACTION = 1.0 _GLOBAL_CPU_MEM_CAPACITY = -1 # copy from PatrickStar def _get_cpu_memory_info(): ps_mem_info = namedtuple("ps_mem_info", ["total", "fre...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/__init__.py
colossalai/utils/__init__.py
from .common import ( _cast_float, conditional_context, disposable, ensure_path_exists, free_storage, get_current_device, get_non_persistent_buffers_set, is_ddp_ignored, set_seed, ) from .multi_tensor_apply import multi_tensor_applier from .tensor_detector import TensorDetector from ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/safetensors.py
colossalai/utils/safetensors.py
# a python safetensors serializer modified from https://github.com/huggingface/safetensors/blob/41bd1acf38ad28ac559522d40596c6c802f79453/safetensors/src/tensor.rs#L214 import json import warnings from dataclasses import asdict, dataclass from typing import Dict, List, Optional, Tuple import torch from safetensors.torc...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/rank_recorder/rank_recorder.py
colossalai/utils/rank_recorder/rank_recorder.py
import atexit import json import os import shutil import time from typing import Dict, List import matplotlib.colors as mcolors import matplotlib.pyplot as plt import torch import torch.distributed as dist cmap = list(mcolors.TABLEAU_COLORS.values()) LOG_FOLDER = "record.log" MAX_WAIT_TIME = 20 class Event: de...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/rank_recorder/__init__.py
colossalai/utils/rank_recorder/__init__.py
from colossalai.utils.rank_recorder.rank_recorder import recorder __all__ = ["recorder"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/model/utils.py
colossalai/utils/model/utils.py
# This code has been adapted from the DeepSpeed library. # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import functools from typing import Optional import torch def substitute_init_recursively(cls, func, visited: set): for subcls in cls.__subclasses__(): substitute_init_recurs...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/model/__init__.py
colossalai/utils/model/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/multi_tensor_apply/__init__.py
colossalai/utils/multi_tensor_apply/__init__.py
from .multi_tensor_apply import MultiTensorApply multi_tensor_applier = MultiTensorApply(2048 * 32)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/multi_tensor_apply/multi_tensor_apply.py
colossalai/utils/multi_tensor_apply/multi_tensor_apply.py
# modified from https://github.com/NVIDIA/apex/blob/master/apex/multi_tensor_apply/multi_tensor_apply.py class MultiTensorApply(object): """ Apply an operation to a list of tensors efficiently. Args: chunk_size (int): Size of a chunk. """ available = False warned = False def __i...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/tensor_detector/tensor_detector.py
colossalai/utils/tensor_detector/tensor_detector.py
import gc import inspect from collections import defaultdict from typing import Optional import torch import torch.nn as nn LINE_WIDTH = 108 LINE = "-" * LINE_WIDTH + "\n" class TensorDetector: def __init__( self, show_info: bool = True, log: str = None, include_cpu: bool = False, module: Optional[nn.Mo...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/tensor_detector/__init__.py
colossalai/utils/tensor_detector/__init__.py
from .tensor_detector import TensorDetector
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/colo_parameter.py
colossalai/tensor/colo_parameter.py
from typing import Optional import torch from colossalai.tensor.colo_tensor import ColoTensor from colossalai.tensor.param_op_hook import ColoParamOpHookManager from .colo_tensor import _convert_output WHITE_LIST_FUNCS = {torch.Tensor.__getitem__} NO_HOOK_FUNCS = {torch.Tensor.is_floating_point} def is_no_hook_op...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/shape_consistency.py
colossalai/tensor/shape_consistency.py
import math from copy import deepcopy from dataclasses import dataclass from typing import Dict, List, Tuple import numpy as np import torch from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, TrainCycleItem from colossalai.context.singleton_meta import SingletonMeta from colossalai.tensor...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/comm_spec.py
colossalai/tensor/comm_spec.py
import operator from enum import Enum from functools import reduce import torch import torch.distributed as dist from torch.distributed import ReduceOp __all__ = [ "CollectiveCommPattern", "CommSpec", ] def _all_gather(tensor, comm_spec): """ Implement all gather operation on device mesh based on in...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/colo_tensor.py
colossalai/tensor/colo_tensor.py
from functools import lru_cache from typing import Callable, Set import torch INPALCE_MAPPING = { torch.Tensor.add_: torch.Tensor.add, torch.Tensor.sub_: torch.Tensor.sub, torch.Tensor.mul_: torch.Tensor.mul, torch.Tensor.div_: torch.Tensor.div, } @lru_cache(None) def _get_my_nowrap_functions() -> S...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/utils.py
colossalai/tensor/utils.py
from typing import Dict, Iterator, List, Tuple, Union import torch import torch.nn as nn from colossalai.tensor.colo_tensor import ColoTensor def all_gather_simulator(target_pair): """ Simulating all-gather operation, analyze the communication cost and simulate the influence of the DimSpec. We don'...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/param_op_hook.py
colossalai/tensor/param_op_hook.py
from abc import ABC, abstractmethod from contextlib import contextmanager from typing import Any, List, Tuple import torch from torch.utils._pytree import TreeSpec, tree_flatten, tree_unflatten class ColoParamOpHook(ABC): """ Hook which is triggered by each operation when operands contain ColoParameter. ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/__init__.py
colossalai/tensor/__init__.py
from .colo_parameter import ColoParameter from .colo_tensor import ColoTensor from .comm_spec import CollectiveCommPattern, CommSpec from .param_op_hook import ColoParamOpHook, ColoParamOpHookManager from .utils import convert_dim_partition_dict, convert_parameter, merge_same_dim_mesh_list, named_params_with_colotensor...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/sharding_spec.py
colossalai/tensor/sharding_spec.py
import operator from functools import reduce import torch from colossalai.device.device_mesh import DeviceMesh from .utils import merge_same_dim_mesh_list __all__ = ["_DimSpec", "ShardingException", "ShardingSpec"] ALLGATHER_COST = 20 SHARD_COST = 5 STEP_PENALTY = 6 NAN = "nan" class _DimSpec: """ Shardi...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/moe_tensor/api.py
colossalai/tensor/moe_tensor/api.py
from typing import List import torch import torch.distributed as dist from torch.distributed import ProcessGroup from .moe_info import MoeParallelInfo def is_moe_tensor(tensor: torch.Tensor) -> bool: """ Check whether the given tensor is a moe tensor. Args: tensor (torch.Tensor): The tensor to ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/moe_tensor/moe_info.py
colossalai/tensor/moe_tensor/moe_info.py
from colossalai.cluster import ProcessGroupMesh class MoeParallelInfo: """Moe parallelism information, storing parallel sizes and groups.""" def __init__(self, ep_inside: bool, ep_size: int, dp_size: int, pp_size: int = 1): """ init MoeParallelInfo with ep_size, dp_size and pp_size A...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/moe_tensor/__init__.py
colossalai/tensor/moe_tensor/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/padded_tensor/api.py
colossalai/tensor/padded_tensor/api.py
import torch def _hijack_detach_and_clone(ptensor: torch.Tensor) -> torch.Tensor: """ Hijack the detach and clone methods of the tensor to make sure the dist_layout is copied. Args: tensor (torch.Tensor): The tensor to be hijacked. Returns: torch.Tensor: The hijacked tensor. """ ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/padded_tensor/__init__.py
colossalai/tensor/padded_tensor/__init__.py
from .api import init_as_padded_tensor, is_padded_tensor, to_padded_tensor, to_unpadded_tensor __all__ = ["is_padded_tensor", "to_padded_tensor", "to_unpadded_tensor", "init_as_padded_tensor"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/d_tensor/api.py
colossalai/tensor/d_tensor/api.py
import copy import operator from functools import reduce from typing import Union import torch import torch.distributed as dist from torch.distributed import ProcessGroup from colossalai.device.device_mesh import DeviceMesh from colossalai.tensor.d_tensor.sharding_spec import DimSpec from .layout import Layout from ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/d_tensor/comm_spec.py
colossalai/tensor/d_tensor/comm_spec.py
from enum import Enum from typing import Dict import torch import torch.distributed as dist from torch.distributed import ReduceOp __all__ = [ "CollectiveCommPattern", "CommSpec", ] class CollectiveCommPattern(Enum): GATHER_FWD_SPLIT_BWD = "gather_fwd_split_bwd" ALL2ALL_FWD_ALL2ALL_BWD = "all2all_fw...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/d_tensor/layout_converter.py
colossalai/tensor/d_tensor/layout_converter.py
import math from copy import deepcopy from dataclasses import dataclass from typing import Dict, List, Tuple import torch import torch.distributed as dist from colossalai.context.singleton_meta import SingletonMeta from colossalai.tensor.d_tensor.comm_spec import * from colossalai.tensor.d_tensor.layout import Layout...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/d_tensor/utils.py
colossalai/tensor/d_tensor/utils.py
import operator from functools import reduce from typing import Dict from colossalai.tensor.d_tensor.comm_spec import CollectiveCommPattern, CommSpec from colossalai.tensor.d_tensor.layout import Layout def get_comm_cost(layout: Layout, comm_spec: CommSpec, forward_only: bool = False) -> Dict[str, float]: """ ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/d_tensor/misc.py
colossalai/tensor/d_tensor/misc.py
class LayoutException(Exception): pass class DuplicatedShardingDimensionError(LayoutException): pass class ShardingNotDivisibleError(LayoutException): pass class ShardingOutOfIndexError(LayoutException): pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/d_tensor/__init__.py
colossalai/tensor/d_tensor/__init__.py
from .api import ( compute_global_numel, customized_distributed_tensor_to_param, distribute_tensor, distribute_tensor_with_customization, get_device_mesh, get_global_shape, get_layout, get_shard_dim_1d, get_sharding_spec, init_as_dtensor, init_tensor_as_customization_distribu...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/d_tensor/sharding_spec.py
colossalai/tensor/d_tensor/sharding_spec.py
from typing import Dict, List from ..utils import merge_same_dim_mesh_list from .misc import ShardingOutOfIndexError __all__ = ["DimSpec", "ShardingException", "ShardingSpec"] ALLGATHER_COST = 20 SHARD_COST = 5 STEP_PENALTY = 6 NAN = "nan" class DimSpec: """ Sharding spec for single dimension of the sharde...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/d_tensor/layout.py
colossalai/tensor/d_tensor/layout.py
import operator from functools import reduce import torch from colossalai.device.device_mesh import DeviceMesh from .misc import DuplicatedShardingDimensionError, ShardingNotDivisibleError from .sharding_spec import ShardingSpec class Layout: """Layout of a tensor. Attributes: device_mesh: the dev...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/checkpoint_io/checkpoint_io_base.py
colossalai/checkpoint_io/checkpoint_io_base.py
from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, Optional, Union import torch import torch.nn as nn from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler as LRScheduler from colossalai.interface import ModelWrapper from colossalai.logging import get_d...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/checkpoint_io/index_file.py
colossalai/checkpoint_io/index_file.py
import json import os from collections import OrderedDict from pathlib import Path from typing import Any, Dict, List, Union from .utils import is_dtensor_checkpoint __all__ = ["CheckpointIndexFile"] class CheckpointIndexFile: """ This class is a data structure to keep the content in the index.json file for...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/checkpoint_io/general_checkpoint_io.py
colossalai/checkpoint_io/general_checkpoint_io.py
import logging import os from functools import reduce from pathlib import Path from typing import Optional import torch.nn as nn from torch.optim import Optimizer from colossalai.utils.safetensors import load_flat from .checkpoint_io_base import CheckpointIO from .index_file import CheckpointIndexFile from .utils im...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/checkpoint_io/utils.py
colossalai/checkpoint_io/utils.py
# coding=utf-8 import concurrent.futures import os import re import warnings from collections import abc as container_abcs from collections import defaultdict from itertools import chain from pathlib import Path from typing import Dict, Generator, Iterator, List, Mapping, Optional, OrderedDict, Tuple, Union import tor...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/checkpoint_io/__init__.py
colossalai/checkpoint_io/__init__.py
from .checkpoint_io_base import CheckpointIO from .general_checkpoint_io import GeneralCheckpointIO from .hybrid_parallel_checkpoint_io import HybridParallelCheckpointIO from .index_file import CheckpointIndexFile from .moe_checkpoint import MoECheckpointIO __all__ = [ "CheckpointIO", "CheckpointIndexFile", ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/checkpoint_io/hybrid_parallel_checkpoint_io.py
colossalai/checkpoint_io/hybrid_parallel_checkpoint_io.py
import copy import logging import os from collections import defaultdict from functools import reduce from pathlib import Path from shutil import rmtree from typing import Dict, Iterator, Optional, OrderedDict, Tuple import torch import torch.distributed as dist import torch.nn as nn from torch.distributed import Proc...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/checkpoint_io/moe_checkpoint.py
colossalai/checkpoint_io/moe_checkpoint.py
import copy import logging import os from pathlib import Path from shutil import rmtree from typing import Dict, Iterator, Optional, OrderedDict, Tuple import torch import torch.distributed as dist import torch.nn as nn from torch.distributed import ProcessGroup from torch.distributed.distributed_c10d import get_globa...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/accelerator/npu_accelerator.py
colossalai/accelerator/npu_accelerator.py
#!/usr/bin/env python from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.distributed as dist from .base_accelerator import BaseAccelerator try: import torch_npu # noqa except ImportError: pass __all__ = ["NpuAccelerator"] class NpuAccelerator(BaseAccelerator)...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/accelerator/api.py
colossalai/accelerator/api.py
#!/usr/bin/env python from collections import OrderedDict from typing import Union from .base_accelerator import BaseAccelerator from .cpu_accelerator import CpuAccelerator from .cuda_accelerator import CudaAccelerator from .npu_accelerator import NpuAccelerator __all__ = ["set_accelerator", "auto_set_accelerator", "...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/accelerator/cpu_accelerator.py
colossalai/accelerator/cpu_accelerator.py
#!/usr/bin/env python import resource from contextlib import nullcontext from typing import Any, Callable, Dict, List, Optional, Tuple, Union import psutil import torch from .base_accelerator import BaseAccelerator __all__ = ["CpuAccelerator"] class CpuAccelerator(BaseAccelerator): support_set_device: bool = ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/accelerator/cuda_accelerator.py
colossalai/accelerator/cuda_accelerator.py
#!/usr/bin/env python from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.distributed as dist from .base_accelerator import BaseAccelerator __all__ = ["CudaAccelerator"] class CudaAccelerator(BaseAccelerator): """ Accelerator class for Nvidia CUDA devices. ""...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/accelerator/__init__.py
colossalai/accelerator/__init__.py
from .api import auto_set_accelerator, get_accelerator, set_accelerator from .base_accelerator import BaseAccelerator from .cpu_accelerator import CpuAccelerator from .cuda_accelerator import CudaAccelerator from .npu_accelerator import NpuAccelerator __all__ = [ "get_accelerator", "set_accelerator", "auto...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/accelerator/base_accelerator.py
colossalai/accelerator/base_accelerator.py
#!/usr/bin/env python from abc import ABC, abstractmethod from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch __all__ = ["BaseAccelerator"] class BaseAccelerator(ABC): support_set_device: bool = True def __init__(self, name: str, communication_backend: str, is_synchronous: bo...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/cluster/dist_coordinator.py
colossalai/cluster/dist_coordinator.py
import functools import os from contextlib import contextmanager import torch.distributed as dist from torch.distributed import ProcessGroup from colossalai.context.singleton_meta import SingletonMeta class DistCoordinator(metaclass=SingletonMeta): """ This class is used to coordinate distributed training. ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/cluster/__init__.py
colossalai/cluster/__init__.py
from .device_mesh_manager import DeviceMeshManager from .dist_coordinator import DistCoordinator from .process_group_manager import ProcessGroupManager from .process_group_mesh import ProcessGroupMesh __all__ = ["DistCoordinator", "ProcessGroupManager", "DeviceMeshManager", "ProcessGroupMesh"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/cluster/process_group_mesh.py
colossalai/cluster/process_group_mesh.py
import gc import itertools from functools import reduce from operator import mul from typing import Dict, List, Optional, Tuple, Union import numpy as np import torch.distributed as dist from torch.distributed import ProcessGroup from torch.distributed.distributed_c10d import GroupMember def prod(nums: List[int]) ->...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/cluster/process_group_manager.py
colossalai/cluster/process_group_manager.py
from typing import List import torch.distributed as dist from torch.distributed import ProcessGroup class ProcessGroupManager: """ ProcessGroupManager is used to manage the process groups in the cluster. There are some terms used in this class: - pg: the short name for process group - pg...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/cluster/device_mesh_manager.py
colossalai/cluster/device_mesh_manager.py
from dataclasses import dataclass from typing import Dict, List, Tuple, Union import torch import torch.distributed as dist from colossalai.device.alpha_beta_profiler import AlphaBetaProfiler from colossalai.device.device_mesh import DeviceMesh @dataclass class DeviceMeshInfo: """ This class is used to stor...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/testing/pytest_wrapper.py
colossalai/testing/pytest_wrapper.py
""" This file will not be automatically imported by `colossalai.testing` as this file has a dependency on `pytest`. Therefore, you need to explicitly import this file `from colossalai.testing.pytest_wrapper import <func>`.from """ import os def run_on_environment_flag(name: str): """ Conditionally run a test...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/testing/comparison.py
colossalai/testing/comparison.py
from typing import Any, List, OrderedDict import torch import torch.distributed as dist from torch import Tensor from torch.distributed import ProcessGroup from torch.testing import assert_close from torch.utils._pytree import tree_flatten def assert_equal(a: Tensor, b: Tensor): assert torch.all(a == b), f"expec...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/testing/utils.py
colossalai/testing/utils.py
import gc import random import re import socket from functools import partial from inspect import signature from typing import Any, Callable, List import torch import torch.multiprocessing as mp from packaging import version from colossalai.accelerator import get_accelerator def parameterize(argument: str, values: ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/testing/random.py
colossalai/testing/random.py
import random import numpy as np import torch def seed_all(seed, cuda_deterministic=False): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) if cuda_deterministic: # slower, ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/testing/__init__.py
colossalai/testing/__init__.py
from .comparison import ( assert_close, assert_close_loose, assert_equal, assert_equal_in_group, assert_hf_output_close, assert_not_equal, check_state_dict_equal, ) from .pytest_wrapper import run_on_environment_flag from .utils import ( DummyDataloader, clear_cache_before_run, f...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/constants.py
colossalai/legacy/constants.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- ALLOWED_MODES = [None, "1d", "2d", "2.5d", "3d", "sequence"] TENSOR_PARALLEL_MODE = "tensor_parallel_mode" # initializer INITIALIZER_MAPPING = { "data": "Initializer_Data", "tensor": "Initializer_Tensor", "pipeline": "Initializer_Pipeline", "embedding": ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/core.py
colossalai/legacy/core.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from colossalai.legacy.context.parallel_context import global_context __all__ = ["global_context"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/global_variables.py
colossalai/legacy/global_variables.py
from typing import Optional class TensorParallelEnv(object): _instance = None def __new__(cls, *args, **kwargs): if cls._instance is None: cls._instance = object.__new__(cls, *args, **kwargs) return cls._instance def __init__(self, *args, **kwargs): self.load(*args, *...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/__init__.py
colossalai/legacy/__init__.py
from .initialize import ( get_default_parser, initialize, launch, launch_from_openmpi, launch_from_slurm, launch_from_torch, ) __all__ = [ "launch", "launch_from_openmpi", "launch_from_slurm", "launch_from_torch", "initialize", "get_default_parser", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/initialize.py
colossalai/legacy/initialize.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import argparse import os import pprint from pathlib import Path from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union import torch import torch.nn as nn from torch.nn.modules.loss import _Loss from torch.nn.parallel import DistributedDataParallel as...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/__init__.py
colossalai/legacy/zero/__init__.py
from typing import Tuple import torch import torch.nn as nn from colossalai.logging import get_dist_logger from .init_ctx import ZeroInitContext, no_shard_zero_context, no_shard_zero_decrator from .shard_utils import BucketTensorShardStrategy, TensorShardStrategy from .sharded_model import ShardedModelV2 from .shard...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_param/sharded_tensor.py
colossalai/legacy/zero/sharded_param/sharded_tensor.py
import torch from colossalai.legacy.zero.gemini.stateful_tensor import StatefulTensor, TensorState class ShardedTensor(StatefulTensor): def __init__(self, tensor: torch.Tensor, state: TensorState = TensorState.HOLD) -> None: r""" A tensor sharded in multiple processes. Constructed from an existin...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_param/__init__.py
colossalai/legacy/zero/sharded_param/__init__.py
from .sharded_param import ShardedParamV2 from .sharded_tensor import ShardedTensor __all__ = ["ShardedTensor", "ShardedParamV2"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_param/sharded_param.py
colossalai/legacy/zero/sharded_param/sharded_param.py
from typing import List, Optional, Tuple import torch from colossalai.legacy.zero.gemini.stateful_tensor import StatefulTensor, TensorState from colossalai.legacy.zero.gemini.tensor_utils import colo_tensor_mem_usage from .sharded_tensor import ShardedTensor EMPTY_TENSOR_DICT = {} def get_empty_tensor(device: tor...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/init_ctx/__init__.py
colossalai/legacy/zero/init_ctx/__init__.py
from .init_context import ZeroInitContext, no_shard_zero_context, no_shard_zero_decrator __all__ = ["ZeroInitContext", "no_shard_zero_context", "no_shard_zero_decrator"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/init_ctx/init_context.py
colossalai/legacy/zero/init_ctx/init_context.py
import contextlib import functools from contextlib import AbstractContextManager from dataclasses import dataclass from typing import Optional import torch import torch.distributed as dist import torch.nn as nn from colossalai.context.singleton_meta import SingletonMeta from colossalai.legacy.context.parallel_mode im...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/shard_utils/base_shard_strategy.py
colossalai/legacy/zero/shard_utils/base_shard_strategy.py
from abc import ABC, abstractmethod from typing import List, Optional import torch.distributed as dist from colossalai.legacy.zero.sharded_param.sharded_tensor import ShardedTensor class BaseShardStrategy(ABC): def __init__(self) -> None: """Abstract Shard Strategy. Use to shard a tensors on multiple GP...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/shard_utils/bucket_tensor_shard_strategy.py
colossalai/legacy/zero/shard_utils/bucket_tensor_shard_strategy.py
from typing import List, Optional import torch import torch.distributed as dist from torch._utils import _flatten_dense_tensors as flatten from colossalai.accelerator import get_accelerator from colossalai.legacy.zero.sharded_param.sharded_tensor import ShardedTensor from .tensor_shard_strategy import TensorShardStr...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/shard_utils/tensor_shard_strategy.py
colossalai/legacy/zero/shard_utils/tensor_shard_strategy.py
from typing import List, Optional import torch import torch.distributed as dist from colossalai.accelerator import get_accelerator from colossalai.legacy.zero.gemini.tensor_utils import colo_model_data_tensor_move_inline from colossalai.legacy.zero.shard_utils import BaseShardStrategy from colossalai.legacy.zero.shar...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/shard_utils/commons.py
colossalai/legacy/zero/shard_utils/commons.py
from typing import Tuple import torch def get_shard(tensor: torch.Tensor, rank: int, world_size: int) -> Tuple[torch.Tensor, int]: """Return the local shard of a full tensor.""" # Shard using torch.chunk to match all-gather/reduce-scatter. chunks = list(torch.flatten(tensor).chunk(world_size)) while ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/shard_utils/__init__.py
colossalai/legacy/zero/shard_utils/__init__.py
from .base_shard_strategy import BaseShardStrategy from .bucket_tensor_shard_strategy import BucketTensorShardStrategy from .tensor_shard_strategy import TensorShardStrategy __all__ = ["BaseShardStrategy", "TensorShardStrategy", "BucketTensorShardStrategy"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_optim/sharded_optim_v2.py
colossalai/legacy/zero/sharded_optim/sharded_optim_v2.py
# this code is inspired by the DeepSpeed library and implemented with our own design from scratch from enum import Enum from typing import Dict, Optional, Tuple import torch import torch.distributed as dist import torch.nn as nn from torch import Tensor from torch.distributed import ProcessGroup from torch.nn.paramete...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_optim/__init__.py
colossalai/legacy/zero/sharded_optim/__init__.py
from .sharded_optim_v2 import ShardedOptimizerV2 __all__ = ["ShardedOptimizerV2"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false