prefix stringlengths 82 32.6k | middle stringlengths 5 470 | suffix stringlengths 0 81.2k | file_path stringlengths 6 168 | repo_name stringlengths 16 77 | context listlengths 5 5 | lang stringclasses 4
values | ground_truth stringlengths 5 470 |
|---|---|---|---|---|---|---|---|
import asyncio
import websockets
import json
from sentencepiece import SentencePieceProcessor
from model import ExLlama, ExLlamaCache, ExLlamaConfig
from lora import ExLlamaLora
from tokenizer import ExLlamaTokenizer
from generator import ExLlamaGenerator
import argparse
import torch
import sys
import os
import glob
i... | sequence_actual[:, -max_stop_string:])[0] |
next_token = generator.gen_single_token()
# End on stop token
if next_token in stop_tokens:
return held_text, True, full_prompt + built_response, utilized_prompt + built_response, built_response
# Get new text
new_tail = tokenizer.decode(generator.sequence_actual[:, -(max_stop_string + ... | example_ws.py | turboderp-exllama-a544085 | [
{
"filename": "alt_generator.py",
"retrieved_chunk": " if self.remaining_tokens == 0:\n self.sequence_str += self.held_text\n return self.held_text, True\n self.remaining_tokens -= 1\n # Decode the current tail end of the sequence\n old_tail = self.tokenizer... | python | sequence_actual[:, -max_stop_string:])[0] |
from model import ExLlama, ExLlamaCache, ExLlamaConfig
from tokenizer import ExLlamaTokenizer
from generator import ExLlamaGenerator
import torch
import torch.nn.functional as F
import os, glob
import cuda_ext
# Directory containing model, tokenizer, generator
model_directory = "/mnt/str/models/_test_models/TheBloke... | gen_accept_token(batch_token) |
output = tokenizer.decode(generator.sequence[0])
return output
for i in range(10):
alpha = i / 5.0 - 0.4
print()
print(f"--------------------------------------")
print(f"alpha = {alpha:.1f}")
print(f"--------------------------------------")
output = generate_cfg(prompts, alpha, 200)
... | example_cfg.py | turboderp-exllama-a544085 | [
{
"filename": "generator.py",
"retrieved_chunk": " logits = self.model.forward(self.sequence[:, -1:], self.cache, lora = self.lora, input_mask = mask)\n self.apply_rep_penalty(logits)\n logits[:, :, self.tokenizer.bos_token_id] = -10000.0\n if constraints is not N... | python | gen_accept_token(batch_token) |
from model import ExLlama, ExLlamaCache, ExLlamaConfig
from flask import Flask, request
from tokenizer import ExLlamaTokenizer
from generator import ExLlamaGenerator
import os, glob
# Directory containing config.json, tokenizer.model and safetensors file for the model
model_directory = "/mnt/str/models/llama-7b-4bit/"... | settings.token_repetition_penalty_max = 1.176 |
generator.settings.token_repetition_penalty_sustain = config.max_seq_len
generator.settings.temperature = 0.7
generator.settings.top_p = 0.1
generator.settings.top_k = 40
generator.settings.typical = 0.0 # Disabled
outputs = generator.generate_simple(prompt, max_new_tokens = 200)
return... | example_flask.py | turboderp-exllama-a544085 | [
{
"filename": "webui/app.py",
"retrieved_chunk": "def api_delete_session():\n global session\n data = request.get_json()\n session.api_delete_session(data)\n return json.dumps({\"result\": \"ok\"}) + \"\\n\"\n# Set fixed prompt settings\n@app.route(\"/api/set_fixed_prompt\", methods=['POST'])\nd... | python | settings.token_repetition_penalty_max = 1.176 |
import asyncio
import websockets
import json
from sentencepiece import SentencePieceProcessor
from model import ExLlama, ExLlamaCache, ExLlamaConfig
from lora import ExLlamaLora
from tokenizer import ExLlamaTokenizer
from generator import ExLlamaGenerator
import argparse
import torch
import sys
import os
import glob
i... | decode(prompt_ids)[0] |
built_response = ""
remaining_tokens = max_new_tokens
# Settings
stop_strings = []
stop_tokens = []
for t in stop_conditions:
if isinstance(t, int): stop_tokens += [t]
if isinstance(t, str): stop_strings += [t]
held_text = ""
max_stop_string = 2
for ss in stop_s... | example_ws.py | turboderp-exllama-a544085 | [
{
"filename": "alt_generator.py",
"retrieved_chunk": " # stop_conditions: List of strings or integer token IDs that will end the sequence\n # settings: ExLlamaAltGeneratorSettings\n # encode_special_characters: Set to true to tokenize \"</s>\" etc.\n def begin_stream(self, prompt: str, stop_cond... | python | decode(prompt_ids)[0] |
import asyncio
import websockets
import json
from sentencepiece import SentencePieceProcessor
from model import ExLlama, ExLlamaCache, ExLlamaConfig
from lora import ExLlamaLora
from tokenizer import ExLlamaTokenizer
from generator import ExLlamaGenerator
import argparse
import torch
import sys
import os
import glob
i... | gen_begin_reuse(input_ids) |
def stream():
global model, cache, config, generator, tokenizer
global stop_strings, stop_tokens, prompt_ids, held_text, max_stop_string, remaining_tokens
global full_prompt, utilized_prompt, built_response
# Check total response length
if remaining_tokens == 0:
return held_text, True, f... | example_ws.py | turboderp-exllama-a544085 | [
{
"filename": "alt_generator.py",
"retrieved_chunk": " self.sequence_str = self.tokenizer.decode(applied_input_ids)[0] if applied_input_ids.shape[0] < input_ids.shape[0] else prompt\n # Settings\n self.stop_strings = []\n self.stop_tokens = []\n for t in stop_conditions:\n... | python | gen_begin_reuse(input_ids) |
from model import ExLlama, ExLlamaCache, ExLlamaConfig
from tokenizer import ExLlamaTokenizer
from generator import ExLlamaGenerator
import torch
import torch.nn.functional as F
import os, glob
import cuda_ext
# Directory containing model, tokenizer, generator
model_directory = "/mnt/str/models/_test_models/TheBloke... | encode(prompts, return_mask = True) |
generator.gen_begin(ids, mask = mask)
# Sampling loop
for _ in range(max_new_tokens):
logits = model.forward(generator.sequence[:, -1:], cache, input_mask = mask)
generator.apply_rep_penalty(logits)
logits = F.log_softmax(logits, dim = -1)
logits_mixed = (1 - alpha) * lo... | example_cfg.py | turboderp-exllama-a544085 | [
{
"filename": "generator.py",
"retrieved_chunk": " self.sequence = self.sequence[:, num_tokens:]\n self.gen_begin(self.sequence, mask = mask)\n def gen_num_tokens(self):\n return self.sequence_actual.shape[-1]\n # Simple generator function\n def generate_simple(self, pr... | python | encode(prompts, return_mask = True) |
from model import ExLlama, ExLlamaCache, ExLlamaConfig
from tokenizer import ExLlamaTokenizer
from generator import ExLlamaGenerator
import torch
import torch.nn.functional as F
import os, glob
import cuda_ext
# Directory containing model, tokenizer, generator
model_directory = "/mnt/str/models/_test_models/TheBloke... | decode(generator.sequence[0]) |
return output
for i in range(10):
alpha = i / 5.0 - 0.4
print()
print(f"--------------------------------------")
print(f"alpha = {alpha:.1f}")
print(f"--------------------------------------")
output = generate_cfg(prompts, alpha, 200)
print(output[len(prompts[0]):].strip())
| example_cfg.py | turboderp-exllama-a544085 | [
{
"filename": "generator.py",
"retrieved_chunk": " logits = self.model.forward(self.sequence[:, -1:], self.cache, lora = self.lora, input_mask = mask)\n self.apply_rep_penalty(logits)\n logits[:, :, self.tokenizer.bos_token_id] = -10000.0\n if constraints is not N... | python | decode(generator.sequence[0]) |
from model import ExLlama, ExLlamaCache, ExLlamaConfig
from tokenizer import ExLlamaTokenizer
import argparse, sys, os, glob
from torch import version as torch_version
from globals import set_affinity_str
def add_args(parser):
parser.add_argument("-t", "--tokenizer", type = str, help = "Tokenizer model path")
... | calculate_rotary_embedding_base() |
if args.flash_attn:
config.use_flash_attn_2 = True
try:
config.max_input_len = int(args.flash_attn)
except ValueError:
pass
config.matmul_recons_thd = args.matmul_recons_thd
config.fused_mlp_thd = args.fused_mlp_thd
config.sdp_thd = args.sdp_thd
con... | model_init.py | turboderp-exllama-a544085 | [
{
"filename": "example_alt_generator.py",
"retrieved_chunk": " args.lora = os.path.join(args.lora_dir, \"adapter_model.bin\")\n # Model globals\n model_init.set_globals(args)\n # Instantiate model and generator\n config = model_init.make_config(args)\n model = ExLlama(config)\n cach... | python | calculate_rotary_embedding_base() |
from model import ExLlama, ExLlamaCache, ExLlamaConfig
from tokenizer import ExLlamaTokenizer
from generator import ExLlamaGenerator
import os, glob
# Directory containing model, tokenizer, generator
model_directory = "/mnt/str/models/llama-13b-4bit-128g/"
# Locate files we need within that directory
tokenizer_pat... | generate_simple(prompts, max_new_tokens = 200) |
for line in output:
print("---")
print(line)
| example_batch.py | turboderp-exllama-a544085 | [
{
"filename": "example_basic.py",
"retrieved_chunk": "generator.settings.token_repetition_penalty_max = 1.2\ngenerator.settings.temperature = 0.95\ngenerator.settings.top_p = 0.65\ngenerator.settings.top_k = 100\ngenerator.settings.typical = 0.5\n# Produce a simple generation\nprompt = \"Once upon a time,\"... | python | generate_simple(prompts, max_new_tokens = 200) |
from model import ExLlama, ExLlamaCache, ExLlamaConfig
from tokenizer import ExLlamaTokenizer
import argparse, sys, os, glob
from torch import version as torch_version
from globals import set_affinity_str
def add_args(parser):
parser.add_argument("-t", "--tokenizer", type = str, help = "Tokenizer model path")
... | set_auto_map(args.gpu_split) |
config.gpu_peer_fix = args.gpu_peer_fix
config.alpha_value = args.alpha
config.calculate_rotary_embedding_base()
if args.flash_attn:
config.use_flash_attn_2 = True
try:
config.max_input_len = int(args.flash_attn)
except ValueError:
pass
config.matmu... | model_init.py | turboderp-exllama-a544085 | [
{
"filename": "example_chatbot.py",
"retrieved_chunk": "print(f\" -- Sequence length: {args.length}\")\nprint(f\" -- Temperature: {args.temperature:.2f}\")\nprint(f\" -- Top-K: {args.top_k}\")\nprint(f\" -- Top-P: {args.top_p:.2f}\")\nprint(f\" -- Min-P: {args.min_p:.2f}\")\nprint(f\" -- Repetition penalty:... | python | set_auto_map(args.gpu_split) |
from model import ExLlama, ExLlamaCache, ExLlamaConfig
from tokenizer import ExLlamaTokenizer
from generator import ExLlamaGenerator
import torch
import torch.nn.functional as F
import os, glob
import cuda_ext
# Directory containing model, tokenizer, generator
model_directory = "/mnt/str/models/_test_models/TheBloke... | forward(generator.sequence[:, -1:], cache, input_mask = mask) |
generator.apply_rep_penalty(logits)
logits = F.log_softmax(logits, dim = -1)
logits_mixed = (1 - alpha) * logits[0] + alpha * logits[1]
sampled_token, _ = generator.sample_current(logits_mixed)
if sampled_token.item() == tokenizer.eos_token_id: break
batch_token = sam... | example_cfg.py | turboderp-exllama-a544085 | [
{
"filename": "generator.py",
"retrieved_chunk": " self.sequence = self.sequence[:, num_tokens:]\n self.gen_begin(self.sequence, mask = mask)\n def gen_num_tokens(self):\n return self.sequence_actual.shape[-1]\n # Simple generator function\n def generate_simple(self, pr... | python | forward(generator.sequence[:, -1:], cache, input_mask = mask) |
from __future__ import annotations
import pytest
from configzen.errors import ConfigSyntaxError
from configzen.model import ConfigRoute
STRING_DECOMPOSITION_PARAMS = [
("a.b.c", ["a", "b", "c"]),
(r"a\.b.c", ["a.b", "c"]),
("a.b.[c.d]", ["a", "b", "c.d"]),
("[a.b].c.[d.e]", ["a.b", "c", "d.e"]),
... | enter("b") == ConfigRoute("a.b") |
assert ConfigRoute("a").enter(["b", "c"]) == ConfigRoute("a.b.c")
assert ConfigRoute("a").enter(ConfigRoute("b.c")) == ConfigRoute("a.b.c")
assert ConfigRoute("a").enter(ConfigRoute(["b", "c"])) == ConfigRoute("a.b.c")
assert ConfigRoute("a").enter(ConfigRoute("b.[c.d]")) == ConfigRoute("a.b.[c.d]")
... | tests/test_config/test_route.py | bswck-configzen-42ed40f | [
{
"filename": "tests/test_module_wrapping/test_wrapping.py",
"retrieved_chunk": " assert model == MyConfig()\n assert wrapper.a == MyConfig().a\n assert wrapper.b == MyConfig().b\n wrapper.a = \"2137\"\n wrapper.b = \"1337\"\n assert wrapper.a == model.a == 2137\n assert wrapper.b == mo... | python | enter("b") == ConfigRoute("a.b") |
import argparse
import logging
from logging.config import fileConfig
from pathlib import Path
from . import compile, decompile
def parse_args() -> argparse.Namespace:
# create the top-level parser
parser = argparse.ArgumentParser(
description="Decompile|Compile Python source files into bytecode."
... | compile(to_compile=to_compile) |
elif args.command == "decompile":
to_decompile = Path(args.path)
output_path = Path(args.output) if args.output else None
decompile.decompile(to_decompile=to_decompile, output_path=output_path)
def main() -> None:
cli()
if __name__ == "__main__":
main()
| src/pychd/main.py | diohabara-pychd-b1d0a38 | [
{
"filename": "src/pychd/compile.py",
"retrieved_chunk": " parser.add_argument(\"directory\", help=\"Directory to compile\", type=str)\n return parser.parse_args()\ndef compile(to_compile: Path) -> None:\n if to_compile.is_dir():\n logging.info(\"Compiling Python source files...\")\n ... | python | compile(to_compile=to_compile) |
from __future__ import annotations
import contextlib
import functools
from collections.abc import Callable, Coroutine, Iterator
from typing import TYPE_CHECKING, Any, cast, overload
from configzen.model import export_hook, export_model, export_model_async, field_hook
if TYPE_CHECKING:
from configzen.typedefs imp... | dispatch(cls) is export_model_async: |
async def default_async_func(obj: Any, **kwargs: Any) -> Any:
kwargs |= predefined_kwargs
return await obj.export_async(**kwargs)
export_model_async.register(cls, default_async_func)
else:
export_model.register(cls, func)
if export_model_asy... | configzen/decorators.py | bswck-configzen-42ed40f | [
{
"filename": "configzen/model.py",
"retrieved_chunk": " try:\n cast_func = field_hook_registrars.dispatch(cls)\n except KeyError:\n return value\n return cast_func(cls, value)\n field_hook.register = field_hook_registrars.register\n@functools.singledispatch\nde... | python | dispatch(cls) is export_model_async: |
import asyncio
import websockets
import json
from sentencepiece import SentencePieceProcessor
from model import ExLlama, ExLlamaCache, ExLlamaConfig
from lora import ExLlamaLora
from tokenizer import ExLlamaTokenizer
from generator import ExLlamaGenerator
import argparse
import torch
import sys
import os
import glob
i... | set_auto_map('17.615,18.8897') |
config.model_path = model_path # supply path to model weights file
model = ExLlama(config) # create ExLlama instance and load the weights
print(f"Model loaded: {model_path}")
tokenizer = ExLlamaTokenizer(tokenizer_path) # create tokenizer from token... | example_ws.py | turboderp-exllama-a544085 | [
{
"filename": "example_alt_generator.py",
"retrieved_chunk": " # Directory containing model, tokenizer\n model_directory = \"/mnt/str/models/llama-7b-4bit-128g/\"\n # Locate files we need within that directory\n tokenizer_path = os.path.join(model_directory, \"tokenizer.model\")\n model_confi... | python | set_auto_map('17.615,18.8897') |
from model import ExLlama, ExLlamaCache, ExLlamaConfig
from tokenizer import ExLlamaTokenizer
from generator import ExLlamaGenerator
import torch
import torch.nn.functional as F
import os, glob
import cuda_ext
# Directory containing model, tokenizer, generator
model_directory = "/mnt/str/models/_test_models/TheBloke... | sample_current(logits_mixed) |
if sampled_token.item() == tokenizer.eos_token_id: break
batch_token = sampled_token.repeat(2, 1)
generator.gen_accept_token(batch_token)
output = tokenizer.decode(generator.sequence[0])
return output
for i in range(10):
alpha = i / 5.0 - 0.4
print()
print(f"------------... | example_cfg.py | turboderp-exllama-a544085 | [
{
"filename": "test_benchmark_inference.py",
"retrieved_chunk": " ids = tokenizer.encode(prompts)\n assert ids.shape[1] < model.config.max_seq_len, f\"Max length {ids.shape[1]} exceeds model limit {model.config.max_seq_len}\"\n mask = ids.ne(tokenizer.pad_token_id)\n # Batched ge... | python | sample_current(logits_mixed) |
from model import ExLlama, ExLlamaCache, ExLlamaConfig
from tokenizer import ExLlamaTokenizer
from generator import ExLlamaGenerator
import torch
import torch.nn.functional as F
import os, glob
import cuda_ext
# Directory containing model, tokenizer, generator
model_directory = "/mnt/str/models/_test_models/TheBloke... | sequence[:, -1:], cache, input_mask = mask) |
generator.apply_rep_penalty(logits)
logits = F.log_softmax(logits, dim = -1)
logits_mixed = (1 - alpha) * logits[0] + alpha * logits[1]
sampled_token, _ = generator.sample_current(logits_mixed)
if sampled_token.item() == tokenizer.eos_token_id: break
batch_token = sam... | example_cfg.py | turboderp-exllama-a544085 | [
{
"filename": "generator.py",
"retrieved_chunk": " self.sequence = self.sequence[:, num_tokens:]\n self.gen_begin(self.sequence, mask = mask)\n def gen_num_tokens(self):\n return self.sequence_actual.shape[-1]\n # Simple generator function\n def generate_simple(self, pr... | python | sequence[:, -1:], cache, input_mask = mask) |
from datetime import datetime
from typing import Dict
import time
import torch
import torch.nn as nn
from torch.nn.parallel.distributed import DistributedDataParallel
import json
import os
from collections import OrderedDict
def save_checkpoint(prefix: str,
net_model, net_optimizer,
... | dump(opt, f, indent="\t") |
return opt
def dprint(*args, local_rank: int = 0, **kwargs) -> None:
if local_rank == 0:
print(*args, **kwargs)
def time_log() -> str:
a = datetime.now()
return f"*" * 48 + f" {a.year:>4}/{a.month:>2}/{a.day:>2} | {a.hour:>2}:{a.minute:>2}:{a.second:>2}\n"
@torch.no_grad()
def compute_p... | utils/common_utils.py | hynnsk-HP-cd48934 | [
{
"filename": "utils/wandb_utils.py",
"retrieved_chunk": "from typing import Dict, Optional\nimport os\nimport wandb\n__all__ = [\"set_wandb\"]\ndef set_wandb(opt: Dict, local_rank: int = 0, force_mode: Optional[str] = None) -> str:\n if local_rank != 0:\n return \"\"\n # opt = opt\n save_di... | python | dump(opt, f, indent="\t") |
from model import ExLlama, ExLlamaCache, ExLlamaConfig
from lora import ExLlamaLora
from tokenizer import ExLlamaTokenizer
from generator import ExLlamaGenerator
import argparse
import torch
import sys
import os
import glob
import model_init
# Simple interactive chatbot script
torch.set_grad_enabled(False)
torch.cuda... | gen_begin(ids) |
next_userprompt = username + ": "
first_round = True
while True:
res_line = bot_name + ":"
res_tokens = tokenizer.encode(res_line)
num_res_tokens = res_tokens.shape[-1] # Decode from here
if first_round and args.botfirst: in_tokens = res_tokens
else:
# Read and format input
... | example_chatbot.py | turboderp-exllama-a544085 | [
{
"filename": "webui/session.py",
"retrieved_chunk": " held_text = \"\"\n for i in range(self.max_response_tokens):\n # Truncate the past if the next chunk might generate past max_seq_length\n if generator.sequence_actual is not None:\n if generator.sequenc... | python | gen_begin(ids) |
from __future__ import annotations
import os
from appsignal.__about__ import __version__
from appsignal.config import Config, Options
def test_option():
config = Config(Options(active=False, enable_host_metrics=True))
assert config.option("active") is False
assert config.option("enable_host_metrics") i... | update(config.sources["default"]) |
final_options.update(config.sources["system"])
final_options.update(env_options)
assert config.options == final_options
def test_environ_source_bool_is_unset():
config = Config()
assert config.sources["environment"].get("active") is None
assert config.option("active") is None
def test_envi... | tests/test_config.py | appsignal-appsignal-python-5a0cfa9 | [
{
"filename": "src/appsignal/config.py",
"retrieved_chunk": " self.sources = Sources(\n default=self.DEFAULT_CONFIG,\n system=Config.load_from_system(),\n initial=options or Options(),\n environment=Config.load_from_environment(),\n )\n final_... | python | update(config.sources["default"]) |
from datetime import datetime
from typing import Dict
import time
import torch
import torch.nn as nn
from torch.nn.parallel.distributed import DistributedDataParallel
import json
import os
from collections import OrderedDict
def save_checkpoint(prefix: str,
net_model, net_optimizer,
... | load(f, object_pairs_hook=OrderedDict) # noqa |
gpu_list = ','.join(str(x) for x in opt['gpu_ids'])
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list
opt['num_gpus'] = len(opt['gpu_ids'])
print('export CUDA_VISIBLE_DEVICES=' + gpu_list)
print('number of GPUs=' + str(opt['num_gpus']))
os.mak... | utils/common_utils.py | hynnsk-HP-cd48934 | [
{
"filename": "model/dino/utils.py",
"retrieved_chunk": " if os.path.isfile(pretrained_weights):\n state_dict = torch.load(pretrained_weights, map_location=\"cpu\")\n if checkpoint_key is not None and checkpoint_key in state_dict:\n print(f\"Take key {checkpoint_key} in provided ... | python | load(f, object_pairs_hook=OrderedDict) # noqa |
from __future__ import annotations
import os
import re
from logging import DEBUG, ERROR, INFO, WARNING
from appsignal.agent import agent
from appsignal.client import Client
def test_client_options_merge_sources():
os.environ["APPSIGNAL_PUSH_API_KEY"] = "some_key"
client = Client(name="MyApp")
assert cli... | active is False |
def test_client_agent_active():
client = Client(active=True, name="MyApp", push_api_key="000")
assert client._config.options["active"] is True
client.start()
assert os.environ.get("_APPSIGNAL_ACTIVE") == "true"
assert agent.active is True
def test_client_active():
client = Client(
... | tests/test_client.py | appsignal-appsignal-python-5a0cfa9 | [
{
"filename": "src/appsignal/cli/demo.py",
"retrieved_chunk": " active=True,\n name=self._name,\n push_api_key=self._push_api_key,\n log_level=\"trace\",\n )\n print(\"Sending example data to AppSignal...\")\n print(f\"Starting AppSignal clien... | python | active is False |
End of preview. Expand in Data Studio
README.md exists but content is empty.
- Downloads last month
- 5