repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/_subclasses/flop_tensor.py
colossalai/_analyzer/_subclasses/flop_tensor.py
# adopted from https://github.com/facebookresearch/fvcore/blob/main/fvcore/nn/jit_handles.py # ideas from https://pastebin.com/AkvAyJBw # and https://dev-discuss.pytorch.org/t/the-ideal-pytorch-flop-counter-with-torch-dispatch/505 import operator from collections import defaultdict from contextlib import contextmanage...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/node_util.py
colossalai/_analyzer/fx/node_util.py
from dataclasses import dataclass, field from typing import Dict, List, Optional, Tuple, Union import torch from torch.autograd.profiler_util import _format_memory from torch.fx import Node from colossalai._analyzer.envs import MeshConfig def intersect(a, b): return {k: a[k] for k in a if k in b} def subtract...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/codegen.py
colossalai/_analyzer/fx/codegen.py
from typing import Any, Dict, List, Tuple import torch try: from torch.fx.graph import CodeGen except: pass from torch.fx.graph import ( PythonCode, _custom_builtins, _format_target, _is_from_torch, _Namespace, _origin_type_map, _register_custom_builtin, inplace_methods, ma...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/symbolic_profile.py
colossalai/_analyzer/fx/symbolic_profile.py
from torch.fx import GraphModule from .passes import ShapeProp, graph_profile_pass, shape_prop_pass from .passes.graph_profile import FlopProfiler def register_flop_count_impl(func): def wrapper(impl): FlopProfiler._custom_flop_count_impl[func] = impl return impl return wrapper def registe...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/__init__.py
colossalai/_analyzer/fx/__init__.py
from .node_util import MetaInfo from .symbolic_profile import symbolic_profile from .tracer.symbolic_trace import symbolic_trace
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/graph_module.py
colossalai/_analyzer/fx/graph_module.py
import linecache import os import sys import traceback import warnings from pathlib import Path from typing import Any, Dict, Optional, Union import torch import torch.fx import torch.nn as nn from torch.fx.graph import PythonCode try: from torch.fx.graph import _PyTreeCodeGen SUPPORT_PT_CODEGEN = True excep...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/tracer/tracer.py
colossalai/_analyzer/fx/tracer/tracer.py
import functools import inspect from contextlib import contextmanager from typing import Any, Callable, Dict, Iterable, Optional, Set, Tuple, Type, Union import torch import torch.nn as nn from torch.fx import Graph, Node, Proxy, Tracer from torch.utils._pytree import tree_map from colossalai._analyzer._subclasses im...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/tracer/symbolic_trace.py
colossalai/_analyzer/fx/tracer/symbolic_trace.py
from typing import Any, Callable, Dict, Optional, Union import torch from torch.fx import Tracer from torch.utils._pytree import tree_map from colossalai._analyzer._subclasses import MetaTensor try: from ..codegen import ActivationCheckpointCodeGen SUPPORT_ACTIVATION = True except: SUPPORT_ACTIVATION = ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/tracer/__init__.py
colossalai/_analyzer/fx/tracer/__init__.py
from .bias_addition import * from .custom_leaf_module import *
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/tracer/bias_addition.py
colossalai/_analyzer/fx/tracer/bias_addition.py
""" If FX.Graph is traced for auto-parallel module, some extra node will be added during graph construction to deal with the compatibility between bias-addition and all-reduce. """ import torch import torch.nn.functional as F from torch.nn.modules.utils import _pair, _single, _triple from .tracer import register_trac...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/tracer/custom_leaf_module.py
colossalai/_analyzer/fx/tracer/custom_leaf_module.py
import torch from .tracer import register_leaf_module, register_leaf_module_impl try: import apex register_leaf_module(apex.normalization.FusedLayerNorm) register_leaf_module(apex.normalization.FusedRMSNorm) register_leaf_module(apex.normalization.MixedFusedLayerNorm) register_leaf_module(apex.no...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/tracer/proxy.py
colossalai/_analyzer/fx/tracer/proxy.py
import operator from typing import Any, Callable, Dict, Optional, Union import torch from torch.fx import Node, Proxy from torch.utils._pytree import tree_map from colossalai._analyzer._subclasses import MetaTensor Target = Union[Callable[..., Any], str] class ColoProxy(Proxy): _func_dispatch: Dict[Target, Cal...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/passes/shape_prop.py
colossalai/_analyzer/fx/passes/shape_prop.py
"""``torch.fx.ShapeProp``, but with ``MetaTensor``""" from typing import Any, Callable, Dict, Optional, Tuple, Union import torch import torch.fx from torch.autograd.graph import saved_tensors_hooks from torch.utils._pytree import tree_map from colossalai._analyzer._subclasses import MetaTensor, MetaTensorMode from ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/passes/__init__.py
colossalai/_analyzer/fx/passes/__init__.py
from .graph_profile import graph_profile_pass from .shape_prop import ShapeProp, shape_prop_pass, sim_env
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/passes/graph_profile.py
colossalai/_analyzer/fx/passes/graph_profile.py
from typing import Any, Dict, Iterator, List, Optional, Tuple import torch import torch.fx from torch.autograd.profiler_util import _format_memory from torch.fx import GraphModule from torch.fx.node import Argument, Node, Target from colossalai._analyzer._subclasses import flop_count from colossalai._analyzer.fx.node...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/device/device_mesh.py
colossalai/device/device_mesh.py
"""This code is adapted from Alpa https://github.com/alpa-projects/alpa/ with some changes. """ import operator from dataclasses import dataclass from functools import reduce from typing import Dict, List, Union import torch import torch.distributed as dist from torch.distributed import ProcessGroup @datacla...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/device/__init__.py
colossalai/device/__init__.py
from .alpha_beta_profiler import AlphaBetaProfiler from .calc_pipeline_strategy import alpa_dp __all__ = ["AlphaBetaProfiler", "alpa_dp"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/device/alpha_beta_profiler.py
colossalai/device/alpha_beta_profiler.py
import math import time from typing import Dict, List, Tuple import torch import torch.distributed as dist from colossalai.logging import get_dist_logger GB = int((1 << 30)) BYTE = 4 FRAMEWORK_LATENCY = 0 class AlphaBetaProfiler: """ Profile alpha and beta value for a given device list. Usage: ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/device/calc_pipeline_strategy.py
colossalai/device/calc_pipeline_strategy.py
from math import pow import numpy as np def get_submesh_choices(num_hosts, num_devices_per_host, mode="new"): submesh_choices = [] i = 1 p = -1 while i <= num_devices_per_host: i *= 2 p += 1 assert pow(2, p) == num_devices_per_host, ( "Only supports the cases where num_dev...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/quantization/bnb.py
colossalai/quantization/bnb.py
# adapted from Hugging Face accelerate/utils/bnb.py accelerate/utils/modeling.py import importlib.metadata import logging import torch import torch.nn as nn from packaging.version import Version from .bnb_config import BnbQuantizationConfig try: import bitsandbytes as bnb try: # in case lower versi...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/quantization/utils.py
colossalai/quantization/utils.py
import torch import torch.distributed as dist from packaging import version from torch import Tensor from torch.distributed.fsdp._common_utils import _no_dispatch_record_stream from torch.distributed.utils import _p_assert def _all_gather_flat_param( self, padded_unsharded_flat_param: Tensor, ) -> Tensor: ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/quantization/fp8.py
colossalai/quantization/fp8.py
import os from typing import Any, Optional, Tuple import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from packaging.version import Version from torch.distributed import ReduceOp from .fp8_config import dynamic_kernel SUPPORT_TORCH_COMPILE = Version(torch.__version__) >= ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/quantization/__init__.py
colossalai/quantization/__init__.py
from .bnb import quantize_model from .bnb_config import BnbQuantizationConfig __all__ = [ "BnbQuantizationConfig", "quantize_model", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/quantization/fp8_config.py
colossalai/quantization/fp8_config.py
dynamic_kernel: bool = False
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/quantization/bnb_config.py
colossalai/quantization/bnb_config.py
# adapted from Hugging Face accelerate/utils/dataclasses.py import warnings from dataclasses import dataclass, field from typing import List import torch @dataclass class BnbQuantizationConfig: """ A plugin to enable BitsAndBytes 4bit and 8bit quantization """ load_in_8bit: bool = field(default=Fal...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/quantization/fp8_hook.py
colossalai/quantization/fp8_hook.py
import torch.nn.functional as F from colossalai.quantization.fp8 import linear_fp8 from colossalai.tensor.param_op_hook import ColoParamOpHook class FP8Hook(ColoParamOpHook): def pre_forward(self, params) -> None: pass def post_forward(self, params) -> None: pass def pre_backward(self, ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/wrapper.py
colossalai/zero/wrapper.py
from copy import copy from typing import Dict, Optional import torch import torch.nn as nn from .gemini import GeminiDDP def zero_model_wrapper( model: nn.Module, zero_stage: int = 1, gemini_config: Optional[Dict] = None, verbose: bool = False ): """This wrapper function is used to wrap your training model ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/__init__.py
colossalai/zero/__init__.py
from .gemini import GeminiAdamOptimizer, GeminiDDP, GeminiOptimizer, get_static_torch_model from .low_level import LowLevelZeroOptimizer from .wrapper import zero_model_wrapper, zero_optim_wrapper __all__ = [ "GeminiDDP", "GeminiOptimizer", "GeminiAdamOptimizer", "zero_model_wrapper", "zero_optim_w...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/low_level/low_level_optim.py
colossalai/zero/low_level/low_level_optim.py
# this code is inspired by the DeepSpeed library and implemented with our own design from scratch import copy from contextlib import contextmanager, nullcontext from functools import partial from typing import Dict, Iterator, List, Optional, Tuple, Union from weakref import proxy import torch import torch.distributed ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/low_level/zero_hook.py
colossalai/zero/low_level/zero_hook.py
from typing import List from torch._tensor import Tensor from colossalai.tensor.param_op_hook import ColoParamOpHook _ALL_GATHER_HANDLE = "_all_gather_handle" def wait_all_gather_handle(p): if hasattr(p, _ALL_GATHER_HANDLE): handle = getattr(p, _ALL_GATHER_HANDLE) handle.wait() delattr(...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/low_level/__init__.py
colossalai/zero/low_level/__init__.py
from .low_level_optim import LowLevelZeroOptimizer __all__ = ["LowLevelZeroOptimizer"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/low_level/_utils.py
colossalai/zero/low_level/_utils.py
import math from typing import Optional, Tuple, Union import numpy as np import torch import torch.distributed as dist from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors def flatten(input_): return _flatten_dense_tensors(input_) def unflatten(flat, tensors): return _unflatten_dense_t...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/low_level/bookkeeping/bucket_store.py
colossalai/zero/low_level/bookkeeping/bucket_store.py
from typing import Dict import torch from torch import Tensor from torch._utils import _flatten_dense_tensors from torch.distributed import ProcessGroup from colossalai.accelerator.api import get_accelerator from .base_store import BaseStore class BucketStore(BaseStore): def __init__( self, tor...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/low_level/bookkeeping/base_store.py
colossalai/zero/low_level/bookkeeping/base_store.py
from typing import Tuple, Union import numpy as np import torch.distributed as dist from torch.distributed import ProcessGroup class BaseStore: def __init__(self, torch_pg: Union[ProcessGroup, Tuple[ProcessGroup, ...]]): if isinstance(torch_pg, tuple): self.sizes = [dist.get_world_size(group=...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/low_level/bookkeeping/gradient_store.py
colossalai/zero/low_level/bookkeeping/gradient_store.py
from typing import List, Optional from torch import Tensor from .base_store import BaseStore class GradientStore(BaseStore): def __init__(self, *args, partition_grad: bool = False): super().__init__(*args) """ self._grads_of_params mapping the parameter and its gradient slices da...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/low_level/bookkeeping/tensor_bucket.py
colossalai/zero/low_level/bookkeeping/tensor_bucket.py
from typing import Optional import numpy as np import torch import torch.distributed as dist from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from colossalai.quantization.fp8 import all_gather_fp8 from colossalai.zero.low_level._utils import all_gather_into_flat_tensor_nd class TensorBucket...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/low_level/bookkeeping/__init__.py
colossalai/zero/low_level/bookkeeping/__init__.py
from .bucket_store import BucketStore from .gradient_store import GradientStore from .tensor_bucket import TensorBucket __all__ = ["GradientStore", "BucketStore", "TensorBucket"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/placement_policy.py
colossalai/zero/gemini/placement_policy.py
import functools import warnings from abc import ABC, abstractmethod from time import time from typing import Dict, List, Optional, Tuple, Type import torch import torch.distributed as dist from colossalai.accelerator import get_accelerator from colossalai.zero.gemini.chunk import Chunk from .chunk import Chunk, Chu...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/gemini_hook.py
colossalai/zero/gemini/gemini_hook.py
from contextlib import contextmanager from enum import Enum from functools import partial from typing import List import torch from colossalai.accelerator import get_accelerator from colossalai.tensor.param_op_hook import ColoParamOpHook from colossalai.utils import is_ddp_ignored from colossalai.zero.gemini import T...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/gemini_ddp.py
colossalai/zero/gemini/gemini_ddp.py
import itertools from collections import OrderedDict from contextlib import nullcontext from functools import partial from typing import Dict, Iterable, Iterator, List, Optional, Set, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed import ProcessGroup from torch....
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/utils.py
colossalai/zero/gemini/utils.py
from collections import OrderedDict from copy import copy from typing import Optional, Set import torch import torch.distributed as dist import torch.nn as nn from colossalai.accelerator import get_accelerator from .chunk import Chunk def get_temp_total_chunk_on_cuda(chunk: Chunk, dtype: torch.dtype): if chunk...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/gemini_mgr.py
colossalai/zero/gemini/gemini_mgr.py
import functools from time import time from typing import Dict, Iterable, List, Optional, Tuple import torch import torch.distributed as dist from .chunk import Chunk, ChunkManager from .memory_tracer import ChunkMemStatsCollector, MemStats from .placement_policy import PlacementPolicy, PlacementPolicyFactory class...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/__init__.py
colossalai/zero/gemini/__init__.py
from .chunk import ChunkManager, TensorInfo, TensorState, search_chunk_configuration from .gemini_ddp import GeminiDDP from .gemini_mgr import GeminiManager from .gemini_optimizer import GeminiAdamOptimizer, GeminiOptimizer from .utils import get_static_torch_model __all__ = [ "GeminiManager", "TensorInfo", ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/gemini_optimizer.py
colossalai/zero/gemini/gemini_optimizer.py
# this code is inspired by the DeepSpeed library and implemented with our own design from scratch import copy import math from typing import Any, Dict, Iterator, Optional, OrderedDict, Set, Tuple, Union import torch import torch.distributed as dist from packaging.version import Version from torch.distributed import Pr...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/chunk/chunk.py
colossalai/zero/gemini/chunk/chunk.py
from dataclasses import dataclass from enum import Enum from typing import Dict, List, Optional import torch import torch.distributed as dist from torch.distributed import ProcessGroup from colossalai.accelerator import get_accelerator from colossalai.quantization.fp8 import all_gather_fp8 class TensorState(Enum): ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/chunk/utils.py
colossalai/zero/gemini/chunk/utils.py
from time import time from typing import Optional import torch import torch.distributed as dist import torch.nn as nn from .manager import ChunkManager from .search_utils import search_chunk_configuration def safe_div(a, b): if a == 0: return 0 return a / b def init_chunk_manager( model: nn.Mo...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/chunk/search_utils.py
colossalai/zero/gemini/chunk/search_utils.py
import math from typing import Dict, List, Optional, Tuple import numpy as np import torch.distributed as dist import torch.nn as nn from torch.distributed import ProcessGroup from colossalai.tensor import ColoParameter from colossalai.utils import is_ddp_ignored from colossalai.zero.gemini.memory_tracer import MemSt...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/chunk/__init__.py
colossalai/zero/gemini/chunk/__init__.py
from .chunk import Chunk, ChunkFullError, TensorInfo, TensorState from .manager import ChunkManager from .search_utils import classify_params_by_dp_degree, search_chunk_configuration from .utils import init_chunk_manager __all__ = ["Chunk", "ChunkManager", "classify_params_by_dp_degree", "search_chunk_configuration", ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/chunk/manager.py
colossalai/zero/gemini/chunk/manager.py
from collections import deque from typing import Deque, Dict, Iterable, List, Optional, Set, Tuple import torch import torch.distributed as dist from torch.distributed import ProcessGroup from colossalai.accelerator import get_accelerator from colossalai.utils import free_storage from .chunk import Chunk, ChunkFullE...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/memory_tracer/chunk_memstats_collector.py
colossalai/zero/gemini/memory_tracer/chunk_memstats_collector.py
from typing import Optional from colossalai.accelerator import get_accelerator from colossalai.zero.gemini.chunk import ChunkManager from .memory_stats import MemStats from .memstats_collector import MemStatsCollector class ChunkMemStatsCollector(MemStatsCollector): def __init__(self, chunk_manager: ChunkManage...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/memory_tracer/memory_monitor.py
colossalai/zero/gemini/memory_tracer/memory_monitor.py
import json from abc import abstractmethod from concurrent.futures import ThreadPoolExecutor from time import sleep, time import torch from colossalai.accelerator import get_accelerator class MemoryMonitor: """Base class for all types of memory monitor. All monitors should have a list called `time_stamps` a...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/memory_tracer/memstats_collector.py
colossalai/zero/gemini/memory_tracer/memstats_collector.py
import time from typing import Optional from .memory_monitor import SyncCudaMemoryMonitor from .memory_stats import MemStats class MemStatsCollector: """ A Memory statistic collector. It works in two phases. Phase 1. Collection Phase: collect memory usage statistics of CPU and GPU. The first iter...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/memory_tracer/utils.py
colossalai/zero/gemini/memory_tracer/utils.py
from typing import Optional, Tuple import torch def colo_model_optimizer_usage(optim) -> Tuple[int, int]: """Trace the optimizer memory usage Args: optim (ShardedOptimV2): an instance of ShardedOptimizer Returns: Tuple[int, int]: cuda/cpu memory usage in Byte """ if optim is Non...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/memory_tracer/static_memstats_collector.py
colossalai/zero/gemini/memory_tracer/static_memstats_collector.py
from typing import Optional import torch import torch.nn as nn from torch.fx import symbolic_trace from colossalai.fx.passes.meta_info_prop import MetaInfoProp from colossalai.fx.profiler import calculate_fwd_out, calculate_fwd_tmp, is_compatible_with_meta from colossalai.zero.gemini.chunk import ChunkManager if is_...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/memory_tracer/param_runtime_order.py
colossalai/zero/gemini/memory_tracer/param_runtime_order.py
from abc import ABC import torch class ParamGenerator(ABC): def append(self, param: torch.nn.Parameter): pass def generate(self): pass def clear(self): pass class OrderedParamGenerator(ParamGenerator): """OrderedParamGenerator Contain the order of parameters visited d...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/memory_tracer/__init__.py
colossalai/zero/gemini/memory_tracer/__init__.py
from .param_runtime_order import OrderedParamGenerator # isort:skip from .memory_stats import MemStats # isort:skip from .memory_monitor import AsyncMemoryMonitor, SyncCudaMemoryMonitor # isort:skip from .memstats_collector import MemStatsCollector # isort:skip from .chunk_memstats_collector import ChunkMemStatsCol...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/memory_tracer/runtime_mem_tracer.py
colossalai/zero/gemini/memory_tracer/runtime_mem_tracer.py
import torch.nn from colossalai.tensor.param_op_hook import ColoParamOpHookManager from colossalai.utils import _cast_float from .memory_stats import MemStats __all__ = ["RuntimeMemTracer"] class RuntimeMemTracer: """RuntimeMemTracer for the module training using ColoParameter. Trace non-model memory usag...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/memory_tracer/memory_stats.py
colossalai/zero/gemini/memory_tracer/memory_stats.py
from typing import List, Optional import torch from .param_runtime_order import OrderedParamGenerator class MemStats(object): def __init__(self) -> None: """ Store the non model data statistics used for Gemini and GeminiOptimizer. """ # (preop_step, List[param]) self._ste...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/autochunk/estimate_memory.py
colossalai/autochunk/estimate_memory.py
from typing import Dict, List import torch from torch.fx.node import Node from .utils import NodeMgr, get_node_shape, is_non_memory_node class EstimateMemory(object): """ Estimate memory with chunk """ def __init__(self) -> None: pass def _get_node_size(self, x: Node) -> float: ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/autochunk/search_chunk.py
colossalai/autochunk/search_chunk.py
import copy from typing import Dict, List, Tuple from torch.fx.node import Node from .estimate_memory import EstimateMemory from .reorder_graph import ReorderGraph from .select_chunk import SelectChunk from .trace_flow import TraceFlow from .trace_indice import TraceIndice from .utils import NodeMgr, get_logger, is_n...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/autochunk/trace_flow.py
colossalai/autochunk/trace_flow.py
from typing import Dict, List, Tuple from torch.fx.node import Node from .trace_indice import TraceIndice from .utils import ( NodeMgr, find_chunk_all_input_nodes, find_chunk_compute_input_and_output_nodes, find_tensor_shape_node, flat_list, get_node_name, get_node_shape, is_non_comput...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/autochunk/autochunk_codegen.py
colossalai/autochunk/autochunk_codegen.py
from typing import Any, Callable, Dict, Iterable, List, Tuple import torch import colossalai from colossalai.fx._compatibility import is_compatible_with_meta from colossalai.fx.codegen.activation_checkpoint_codegen import CODEGEN_AVAILABLE AUTOCHUNK_AVAILABLE = CODEGEN_AVAILABLE and is_compatible_with_meta() if AUT...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/autochunk/utils.py
colossalai/autochunk/utils.py
from typing import Any, Dict, List, Union from torch.fx.node import Node from colossalai.logging import get_dist_logger NON_COMPUTE_OP = ["placeholder", "get_attr", "output"] NON_COMPUTE_NAME = ["getattr", "eq", "_assert_is_none", "_assert", "finfo", "size"] logger = get_dist_logger() class NodeMgr(object): de...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/autochunk/reorder_graph.py
colossalai/autochunk/reorder_graph.py
from .trace_indice import TraceIndice from .utils import NodeMgr class ReorderGraph(object): """ Reorder node list and indice trace list """ def __init__(self, trace_indice: TraceIndice, node_mgr: NodeMgr) -> None: self.trace_indice = trace_indice self.node_mgr = node_mgr self...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/autochunk/trace_indice.py
colossalai/autochunk/trace_indice.py
import copy from typing import Dict, List from torch.fx.node import Node from .utils import NodeMgr, find_first_tensor_arg, flat_list, get_module_node_name, get_node_name, get_node_shape class TraceIndice(object): """ Trace all indice information for every node. Indice is a logical concept. Equal dims ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/autochunk/select_chunk.py
colossalai/autochunk/select_chunk.py
from .estimate_memory import EstimateMemory from .reorder_graph import ReorderGraph from .trace_indice import TraceIndice from .utils import NodeMgr, is_non_compute_node class SelectChunk(object): def __init__( self, trace_indice: TraceIndice, estimate_memory: EstimateMemory, reord...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/interface/pretrained.py
colossalai/interface/pretrained.py
from typing import Optional from torch.nn import Module __all__ = [ "get_pretrained_path", "set_pretrained_path", ] def get_pretrained_path(model: Module) -> Optional[str]: return getattr(model, "_pretrained", None) def set_pretrained_path(model: Module, path: str) -> None: setattr(model, "_pretra...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/interface/model.py
colossalai/interface/model.py
import re from typing import Dict, Set import torch import torch.nn as nn from peft import PeftModel, PeftType def extract_lora_layers(model: PeftModel, names: Set[str], adapter_name: str = "default"): config = model.peft_config[adapter_name] if config.peft_type != PeftType.LORA: raise ValueError(f"A...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/interface/optimizer.py
colossalai/interface/optimizer.py
from typing import Dict, Optional, Union import torch import torch.distributed as dist import torch.nn as nn from torch import Tensor from torch.optim import Optimizer class OptimizerWrapper: """ A standard interface for optimizers wrapped by the Booster. Args: optim (Optimizer): The optimizer t...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/interface/__init__.py
colossalai/interface/__init__.py
from .model import AMPModelMixin, ModelWrapper from .optimizer import OptimizerWrapper __all__ = ["OptimizerWrapper", "ModelWrapper", "AMPModelMixin"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/logging/logger.py
colossalai/logging/logger.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import inspect import logging from pathlib import Path from typing import List, Union import torch.distributed as dist class DistributedLogger: """This is a distributed event logger class essentially based on :class:`logging`. Args: name (str): The na...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/logging/__init__.py
colossalai/logging/__init__.py
import logging from typing import List, Optional from .logger import DistributedLogger __all__ = ["get_dist_logger", "DistributedLogger", "disable_existing_loggers"] def get_dist_logger(name: str = "colossalai") -> DistributedLogger: """Get logger instance based on name. The DistributedLogger will create single...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/lazy/lazy_init.py
colossalai/lazy/lazy_init.py
from types import MethodType from typing import Callable, Optional, Union import torch import torch.nn as nn from packaging import version from torch import Tensor from torch.nn import Parameter from torch.utils._pytree import tree_map from colossalai.logging import get_dist_logger from .construction import Construc...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/lazy/pretrained.py
colossalai/lazy/pretrained.py
import copy import os from typing import Callable, Optional, Union import torch from torch.nn import Module from colossalai.interface import pretrained as pretrained_interface class PretrainedManager: old_from_pretrained: Optional[Callable] = None @staticmethod def inject() -> None: try: ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/lazy/construction.py
colossalai/lazy/construction.py
from contextlib import contextmanager from typing import Callable, Dict, Tuple import torch __all__ = [ "_LEGACY_TENSOR_CONSTRUCTOR", "_NO_META_FACTORY", "_NORMAL_FACTORY", "ConstructorManager", ] # reference: https://pytorch.org/cppdocs/notes/tensor_creation.html _NORMAL_FACTORY = [ "arange", ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/lazy/__init__.py
colossalai/lazy/__init__.py
from .lazy_init import LazyInitContext, LazyTensor __all__ = [ "LazyInitContext", "LazyTensor", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/__init__.py
colossalai/kernel/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/kernel_loader.py
colossalai/kernel/kernel_loader.py
import warnings from typing import List from .extensions import ( CpuAdamArmExtension, CpuAdamX86Extension, FlashAttentionDaoCudaExtension, FlashAttentionNpuExtension, FlashAttentionSdpaCudaExtension, FusedOptimizerCudaExtension, InferenceOpsCudaExtension, LayerNormCudaExtension, Mo...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/jit/option.py
colossalai/kernel/jit/option.py
import torch from colossalai.accelerator import get_accelerator from .bias_dropout_add import bias_dropout_add_fused_train from .bias_gelu import bias_gelu_impl JIT_OPTIONS_SET = False def set_jit_fusion_options(): """Set PyTorch JIT layer fusion options.""" # LSG: the latest pytorch and CUDA versions may ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/jit/bias_dropout_add.py
colossalai/kernel/jit/bias_dropout_add.py
import torch def bias_dropout_add(x, bias, residual, prob, training): # type: (Tensor, Tensor, Tensor, float, bool) -> Tensor out = torch.nn.functional.dropout(x + bias, p=prob, training=training) out = residual + out return out @torch.jit.script def bias_dropout_add_fused_train( x: torch.Tensor...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/jit/bias_gelu.py
colossalai/kernel/jit/bias_gelu.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. import torch ###### BIAS GELU FUSION/ NO AUTOGRAD ################ # 1/sqrt(2*pi)-> 0.3989423 # 1/sqrt(2) -> 0.70710678 # sqrt(2/pi) -> 0.79788456 # this function is tanh approximation of gelu # actual gelu is: # x * 0.5 * (1.0 + torch.erf(x * 0.7071067...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/jit/__init__.py
colossalai/kernel/jit/__init__.py
from .bias_dropout_add import bias_dropout_add_fused_inference, bias_dropout_add_fused_train from .bias_gelu import bias_gelu_impl from .option import set_jit_fusion_options __all__ = [ "bias_dropout_add_fused_train", "bias_dropout_add_fused_inference", "bias_gelu_impl", "set_jit_fusion_options", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/fused_rotary_embedding.py
colossalai/kernel/triton/fused_rotary_embedding.py
import torch import triton import triton.language as tl @triton.jit def fused_rotary_emb( q, k, cos_cache, sin_cache, cumsum_lengths, q_token_stride, q_head_stride, k_token_stride, k_head_stride, head_dim_stride, cos_token_stride, cos_dim_stride, q_total_tokens, ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/rotary_cache_copy.py
colossalai/kernel/triton/rotary_cache_copy.py
import torch import triton import triton.language as tl @triton.jit def prefill_cache_kernel( cos_cache, sin_cache, cumsum_lengths, cos_output, sin_output, cache_stride, hidden_stride, total_length, HIDDEN_DIM: tl.constexpr, N_ELEMENTS: tl.constexpr, BLOCK_SIZE: tl.constexp...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/llama_act_combine_kernel.py
colossalai/kernel/triton/llama_act_combine_kernel.py
from functools import reduce from typing import Any, Tuple import torch from torch import Tensor from torch.cuda.amp import custom_bwd, custom_fwd try: import triton import triton.language as tl HAS_TRITON = True except ImportError: HAS_TRITON = False print("please install triton from https://git...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/qkv_matmul_kernel.py
colossalai/kernel/triton/qkv_matmul_kernel.py
try: import triton import triton.language as tl HAS_TRITON = True except ImportError: HAS_TRITON = False print("please install triton from https://github.com/openai/triton") if HAS_TRITON: """ this kernel function is modified from https://triton-lang.org/main/getting-started/tutorials/03-...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/softmax.py
colossalai/kernel/triton/softmax.py
import torch try: import triton import triton.language as tl HAS_TRITON = True except ImportError: HAS_TRITON = False print("please install triton from https://github.com/openai/triton") if HAS_TRITON: """ softmax kernel is modified based on https://github.com/openai/triton/blob/34817...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/kvcache_copy.py
colossalai/kernel/triton/kvcache_copy.py
import torch import triton import triton.language as tl # Triton 2.1.0 # supports two types of cache layouts # 1. [num_blocks, num_kv_heads, block_size, head_dim] # 2. [num_blocks, num_kv_heads, head_dim // x, block_size, x] @triton.jit def _copy_to_kcache_seqlen_n_kernel( K, # K or V KCache, # [num_blocks,...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/no_pad_rotary_embedding.py
colossalai/kernel/triton/no_pad_rotary_embedding.py
import warnings from typing import Optional import torch import triton import triton.language as tl """ # Base autotune if needed @triton.autotune( configs=[ triton.Config({'BLOCK_HEAD':4,"BLOCK_TOKENS":4,},num_warps=4), triton.Config({'BLOCK_HEAD':4,"BLOCK_TOKENS":8,},num_warps=8), triton...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/rms_layernorm.py
colossalai/kernel/triton/rms_layernorm.py
try: import triton import triton.language as tl HAS_TRITON = True except ImportError: HAS_TRITON = False print("please install triton from https://github.com/openai/triton") if HAS_TRITON: # CREDITS: These functions are adapted from the Triton tutorial # https://triton-lang.org/main/gettin...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/context_attn_unpad.py
colossalai/kernel/triton/context_attn_unpad.py
# Applying the FlashAttention V2 as described in: # "FlashAttention-2: Faster Attention with Better Parallelism and Work Partitioning" # by Tri Dao, 2023 # https://github.com/Dao-AILab/flash-attention # # Inspired and modified from Triton Tutorial - Fused Attention # https://triton-lang.org/main/getting-started/tutoria...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/__init__.py
colossalai/kernel/triton/__init__.py
try: import triton HAS_TRITON = True except ImportError: HAS_TRITON = False print("Triton is not installed. Please install Triton to use Triton kernels.") # There may exist import error even if we have triton installed. if HAS_TRITON: from .context_attn_unpad import context_attention_unpadded ...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/flash_decoding.py
colossalai/kernel/triton/flash_decoding.py
# Applying Flash-Decoding as descibed in # https://pytorch.org/blog/flash-decoding/ # by Tri Dao, 2023 import torch import triton import triton.language as tl # Triton 2.1.0 @triton.jit def _flash_decoding_fwd_kernel( Q, # [batch_size * q_len, head_num, head_dim] KCache, # [num_blocks, num_kv_heads, block_s...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_C/__init__.py
colossalai/_C/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/accelerator.py
colossalai/booster/accelerator.py
import torch import torch.nn as nn __all__ = ["Accelerator"] _supported_devices = [ "cpu", "cuda", # To be supported # 'xpu', # 'npu', # 'tpu', ] class Accelerator: """ Accelerator is an abstraction for the hardware device that is used to run the model. Args: device (str...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/booster.py
colossalai/booster/booster.py
from contextlib import contextmanager from typing import Any, Callable, Dict, Iterator, List, Optional, Union import torch import torch.nn as nn from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler as LRScheduler from torch.utils.data import DataLoader from colossalai.logging import get...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/__init__.py
colossalai/booster/__init__.py
from .accelerator import Accelerator from .booster import Booster from .plugin import Plugin
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/low_level_zero_plugin.py
colossalai/booster/plugin/low_level_zero_plugin.py
import enum import os from contextlib import nullcontext from functools import partial from pathlib import Path from types import MethodType from typing import Callable, Dict, Iterator, List, Optional, Tuple import torch import torch.distributed import torch.distributed as dist import torch.nn as nn from torch.distrib...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/torch_fsdp_plugin.py
colossalai/booster/plugin/torch_fsdp_plugin.py
import os from pathlib import Path from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple import torch import torch.nn as nn from packaging import version from torch.distributed import ProcessGroup if version.parse(torch.__version__) >= version.parse("1.12.0"): from torch.distributed.fs...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/torch_ddp_plugin.py
colossalai/booster/plugin/torch_ddp_plugin.py
from typing import Callable, Dict, Iterator, List, Optional, Tuple, Union import torch import torch.nn as nn from peft import PeftModel from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler as LRScheduler from torch.utils._pytre...
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false