Dataset Viewer
Auto-converted to Parquet Duplicate
repository
stringclasses
166 values
file_path
stringlengths
6
125
url
stringlengths
89
210
code
stringlengths
413
290k
chunk
stringlengths
56
175k
lucidrains/lion-pytorch
lion_pytorch/triton.py
https://github.com/lucidrains/lion-pytorch/blob/6a74fdc0ba572ab5683dc0270c66c20ecbc02d09/lion_pytorch/triton.py
import torch try: import triton import triton.language as tl except ImportError as e: print('triton is not installed, please install by running `pip install triton>=2.2.0`') exit() # triton cuda kernel @triton.autotune(configs = [ triton.Config({'BLOCK_SIZE': 128}, num_warps = 4), triton.Conf...
@triton.jit def update_fn_kernel( p_ptr, grad_ptr, exp_avg_ptr, lr, wd, beta1, beta2, n_elements, BLOCK_SIZE: tl.constexpr, ): pid = tl.program_id(axis = 0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements ...
jax-ml/jax-triton
examples/add.py
https://github.com/jax-ml/jax-triton/blob/9aff06677a24d07e510f3632532a88b6804324dc/examples/add.py
# Copyright 2024 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
@triton.jit def add_kernel( x_ptr, y_ptr, output_ptr, block_size: tl.constexpr, ): """Adds two vectors.""" pid = tl.program_id(axis=0) block_start = pid * block_size offsets = block_start + tl.arange(0, block_size) mask = offsets < 8 x = tl.load(x_ptr + offsets, mask=mask) y = tl.load(y_p...
josStorer/RWKV-Runner
finetune/lora/v6/fla/ops/hgrn/chunk.py
https://github.com/josStorer/RWKV-Runner/blob/ad6170816a776bfc312837aafc9a3ff889a3cdd3/finetune/lora/v6/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from ...
@triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) ...
josStorer/RWKV-Runner
finetune/lora/v6/fla/ops/hgrn/chunk.py
https://github.com/josStorer/RWKV-Runner/blob/ad6170816a776bfc312837aafc9a3ff889a3cdd3/finetune/lora/v6/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from ...
@triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT))...
josStorer/RWKV-Runner
finetune/lora/v6/fla/ops/hgrn/chunk.py
https://github.com/josStorer/RWKV-Runner/blob/ad6170816a776bfc312837aafc9a3ff889a3cdd3/finetune/lora/v6/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from ...
@triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * B...
josStorer/RWKV-Runner
finetune/lora/v6/fla/ops/hgrn/chunk.py
https://github.com/josStorer/RWKV-Runner/blob/ad6170816a776bfc312837aafc9a3ff889a3cdd3/finetune/lora/v6/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from ...
@triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in r...
INT-FlashAttention2024/INT-FlashAttention
flash_atten_full_int8.py
https://github.com/INT-FlashAttention2024/INT-FlashAttention/blob/7f7bfb00bcd26b2cef49e7783f51ef610e05abf7/flash_atten_full_int8.py
import pytest import torch import triton import triton.language as tl from configs import * @triton.jit def _attn_fwd_inner_full_int8(acc, l_i, m_i, q, # K_block_ptr, V_block_ptr, # q_scale, K_block_scale_ptr, v_scale,# start_m, qk_scale, # ...
@triton.jit def _attn_fwd_inner_full_int8(acc, l_i, m_i, q, # K_block_ptr, V_block_ptr, # q_scale, K_block_scale_ptr, v_scale,# start_m, qk_scale, # BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr, # ...
INT-FlashAttention2024/INT-FlashAttention
flash_atten_full_int8.py
https://github.com/INT-FlashAttention2024/INT-FlashAttention/blob/7f7bfb00bcd26b2cef49e7783f51ef610e05abf7/flash_atten_full_int8.py
import pytest import torch import triton import triton.language as tl from configs import * @triton.jit def _attn_fwd_inner_full_int8(acc, l_i, m_i, q, # K_block_ptr, V_block_ptr, # q_scale, K_block_scale_ptr, v_scale,# start_m, qk_scale, # ...
@triton.jit def _attn_fwd_full_int8(Q, K, V, Q_scale, K_scale, V_scale, sm_scale, Out, # stride_qz, stride_qh, stride_qm, stride_qk, # stride_kz, stride_kh, stride_kn, stride_kk, # stride_vz, stride_vh, stride_vk, stride_vn, # stride_oz, stride_oh, stride_om,...
TD87/triton-kernels
gemm_matmul.py
https://github.com/TD87/triton-kernels/blob/17a97ede7b6d0ca7356db68b56d0e5b6a9080ad4/gemm_matmul.py
import math import torch # type: ignore import triton # type: ignore import triton.language as tl # type: ignore @triton.jit() def matmul_kernel(x_ptr, y_ptr, out_ptr, M, N, K, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr): pid_r = tl.program_id(0) pid_c = tl.program_i...
@triton.jit () def matmul_kernel(x_ptr, y_ptr, out_ptr, M, N, K, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr): pid_r = tl.program_id(0) pid_c = tl.program_id(1) row_start = pid_r * BLOCK_M row_offsets = row_start + tl.arange(0, BLOCK_M) col_start = pid_c ...
xiaonans/triton-gemm-benchmark
kernels/basic_matmul.py
https://github.com/xiaonans/triton-gemm-benchmark/blob/436ee5a77e01ede7e4a1fe015f533dfdc53b31d3/kernels/basic_matmul.py
import triton import triton.language as tl import torch from .autotune_config import get_autotune_config # `triton.jit`'ed functions can be auto-tuned by using the `triton.autotune` decorator, which consumes: # - A list of `triton.Config` objects that define different configurations of # meta-parameters (e.g.,...
@triton.jit def matmul_kernel( # Pointers to matrices a_ptr, b_ptr, c_ptr, # Matrix dimensions M, N, K, # The stride variables represent how much to increase the ptr by when moving by 1 # element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`...
xiaonans/triton-gemm-benchmark
kernels/basic_matmul.py
https://github.com/xiaonans/triton-gemm-benchmark/blob/436ee5a77e01ede7e4a1fe015f533dfdc53b31d3/kernels/basic_matmul.py
import triton import triton.language as tl import torch from .autotune_config import get_autotune_config # `triton.jit`'ed functions can be auto-tuned by using the `triton.autotune` decorator, which consumes: # - A list of `triton.Config` objects that define different configurations of # meta-parameters (e.g.,...
@triton.jit def leaky_relu(x): return tl.where(x >= 0, x, 0.01 * x) def matmul(a, b, activation=""): # Check constraints. assert a.shape[1] == b.shape[0], "Incompatible dimensions" assert a.is_contiguous(), "Matrix A must be contiguous" M, K = a.shape K, N = b.shape # Allocates output. ...
xiaohuguo2023/scripts
others/tune_gemm1.py
https://github.com/xiaohuguo2023/scripts/blob/b6de80a590c78e78a4f8d64346c34ef445e2aa17/others/tune_gemm1.py
import argparse import sys import yaml import os import glob import subprocess import torch import triton import triton.language as tl from matmul_kernel import matmul_kernel from datetime import datetime import pandas as pd import torch.distributed as dist from torch.multiprocessing import spawn def get_full_tuning_s...
@triton.jit def copy_kernel(input_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr): offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements input = tl.load(input_ptr + offsets, mask=mask) output = input tl.store(output_ptr + ...
phlippe/liger_kernels
liger_kernels/utils.py
https://github.com/phlippe/liger_kernels/blob/0abb152b752e66e1c3e0c78a7eb56daea9a07f42/liger_kernels/utils.py
import jax import numpy as np import triton import triton.language as tl @triton.jit def element_mul_kernel( _, # alias for X_ptr grad_output_ptr, X_ptr, X_stride, n_cols, BLOCK_SIZE: tl.constexpr, ): """ This function multiplies each element of the tensor pointed by X_ptr with the va...
@triton.jit def element_mul_kernel( _, # alias for X_ptr grad_output_ptr, X_ptr, X_stride, n_cols, BLOCK_SIZE: tl.constexpr, ): """ This function multiplies each element of the tensor pointed by X_ptr with the value pointed by grad_output_ptr. The multiplication is performed in-pla...
yifuwang/symm-mem-recipes
triton_utils.py
https://github.com/yifuwang/symm-mem-recipes/blob/8ee5d5b8f53efb9c051e6cdf0ca62270c2b43c34/triton_utils.py
import triton import triton.language as tl @triton.jit def get_tid(): return tl.inline_asm_elementwise( """ mov.u32 $0, %tid.x; mov.u32 $1, %tid.y; mov.u32 $2, %tid.z; """, "=r,=r,=r", [], dtype=(tl.uint32, tl.uint32, tl.uint32), is_pure=True...
@triton.jit def get_tid(): return tl.inline_asm_elementwise( """ mov.u32 $0, %tid.x; mov.u32 $1, %tid.y; mov.u32 $2, %tid.z; """, "=r,=r,=r", [], dtype=(tl.uint32, tl.uint32, tl.uint32), is_pure=True, pack=1, )
yifuwang/symm-mem-recipes
triton_utils.py
https://github.com/yifuwang/symm-mem-recipes/blob/8ee5d5b8f53efb9c051e6cdf0ca62270c2b43c34/triton_utils.py
import triton import triton.language as tl @triton.jit def get_tid(): return tl.inline_asm_elementwise( """ mov.u32 $0, %tid.x; mov.u32 $1, %tid.y; mov.u32 $2, %tid.z; """, "=r,=r,=r", [], dtype=(tl.uint32, tl.uint32, tl.uint32), is_pure=True...
@triton.jit def get_ntid(): return tl.inline_asm_elementwise( """ mov.u32 $0, %ntid.x; mov.u32 $1, %ntid.y; mov.u32 $2, %ntid.z; """, "=r,=r,=r", [], dtype=(tl.uint32, tl.uint32, tl.uint32), is_pure=True, pack=1, )
yifuwang/symm-mem-recipes
triton_utils.py
https://github.com/yifuwang/symm-mem-recipes/blob/8ee5d5b8f53efb9c051e6cdf0ca62270c2b43c34/triton_utils.py
import triton import triton.language as tl @triton.jit def get_tid(): return tl.inline_asm_elementwise( """ mov.u32 $0, %tid.x; mov.u32 $1, %tid.y; mov.u32 $2, %tid.z; """, "=r,=r,=r", [], dtype=(tl.uint32, tl.uint32, tl.uint32), is_pure=True...
@triton.jit def get_flat_tid(): tid_x, tid_y, tid_z = get_tid() ntid_x, ntid_y, _ = get_ntid() return tid_z * ntid_y * ntid_x + tid_y * ntid_x + tid_x
yifuwang/symm-mem-recipes
triton_utils.py
https://github.com/yifuwang/symm-mem-recipes/blob/8ee5d5b8f53efb9c051e6cdf0ca62270c2b43c34/triton_utils.py
import triton import triton.language as tl @triton.jit def get_tid(): return tl.inline_asm_elementwise( """ mov.u32 $0, %tid.x; mov.u32 $1, %tid.y; mov.u32 $2, %tid.z; """, "=r,=r,=r", [], dtype=(tl.uint32, tl.uint32, tl.uint32), is_pure=True...
@triton.jit def get_flat_bid(): return ( tl.program_id(2) * tl.num_programs(1) * tl.num_programs(0) + tl.program_id(1) * tl.num_programs(0) + tl.program_id(0) )
yifuwang/symm-mem-recipes
triton_utils.py
https://github.com/yifuwang/symm-mem-recipes/blob/8ee5d5b8f53efb9c051e6cdf0ca62270c2b43c34/triton_utils.py
import triton import triton.language as tl @triton.jit def get_tid(): return tl.inline_asm_elementwise( """ mov.u32 $0, %tid.x; mov.u32 $1, %tid.y; mov.u32 $2, %tid.z; """, "=r,=r,=r", [], dtype=(tl.uint32, tl.uint32, tl.uint32), is_pure=True...
@triton.jit def sync_threads(): tl.inline_asm_elementwise( "bar.sync 0;", "=r", [], dtype=tl.int32, is_pure=False, pack=1 )
Terapines/AI-Benchmark
src/triton/resize.py
https://github.com/Terapines/AI-Benchmark/blob/0ae8cd849a833d4c35a4b25b722ce98c5af2fe34/src/triton/resize.py
import torch import triton import triton.language as tl import os USE_GPU = False triton.runtime.driver.set_active_to_cpu() def get_resize_kernel_autotune_config(): configs = [ triton.Config({'BLOCK_SIZE_W': 1}), triton.Config({'BLOCK_SIZE_W': 2}), triton.Config({'BLOCK_SIZE_W': 4}), ...
@triton.jit def resize_kernel( src_ptr, out_ptr, channel, height, width, BLOCK_SIZE_W: tl.constexpr, ): pid_h = tl.program_id(axis=0) pid_c = tl.program_id(axis=1) dst_height = 2 * height # 2x upsample dst_width = 2 * width hw_fl = 7 h_idx = pid_h input_y = h_idx...
khulnasoft/divest
divest/kernels/swiglu.py
https://github.com/khulnasoft/divest/blob/53b878ed6cf9f8e172a496bf26a2b22ff3a30a51/divest/kernels/swiglu.py
import triton import triton.language as tl import torch from .utils import calculate_settings @triton.jit def _fg_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,): block_idx = tl.program_id(0) offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements e_row = tl.load...
@triton.jit def _fg_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,): block_idx = tl.program_id(0) offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements e_row = tl.load(e + offsets, mask = mask, other = 0).to(tl.float32) g_row = tl.load(g + offsets, mask = ma...
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
3