Datasets:

Modalities:
Text
ArXiv:
Maple222 commited on
Commit
750766e
·
verified ·
1 Parent(s): bb83c3a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. .github/workflows/mkdocs-deploy.yml +32 -0
  3. .github/workflows/publish-pkg.yml +41 -0
  4. config_hub/eval/arxivcl.yaml +117 -0
  5. config_hub/eval/openllama_cl_ppl.yaml +123 -0
  6. config_hub/eval/qwen2_cl_ppl.yaml +123 -0
  7. config_hub/eval/tinyllama_cl.yaml +117 -0
  8. config_hub/eval/tinyllama_cl_ppl.yaml +123 -0
  9. extensions/thunder/strategies/__init__.py +2 -0
  10. extensions/thunder/strategies/thunder_ddp.py +258 -0
  11. extensions/thunder/strategies/thunder_fsdp.py +459 -0
  12. extensions/thunder/unsloth/__init__.py +0 -0
  13. extensions/thunder/unsloth/executor.py +284 -0
  14. extensions/thunder/unsloth/kernels/__init__.py +4 -0
  15. extensions/thunder/unsloth/kernels/cross_entropy_loss.py +285 -0
  16. extensions/thunder/unsloth/kernels/rope_embedding.py +154 -0
  17. extensions/thunder/unsloth/kernels/swiglu.py +134 -0
  18. extensions/thunder/unsloth/kernels/utils.py +41 -0
  19. extensions/xla/finetune/__init__ +0 -0
  20. extensions/xla/finetune/adapter.py +285 -0
  21. extensions/xla/generate/__init__ +0 -0
  22. extensions/xla/generate/adapter.py +133 -0
  23. extensions/xla/generate/base.py +185 -0
  24. extensions/xla/scripts/__init__ +0 -0
  25. extensions/xla/scripts/prepare_alpaca.py +147 -0
  26. out/eval/openllama_arc_arxiv_mc/arxiv_mc_heatmap.png +3 -0
  27. out/eval/openllama_arc_arxiv_mc/arxiv_mc_heatmap_acc.png +3 -0
  28. out/eval/openllama_arxiv_mc/arxiv_mc_heatmap.png +3 -0
  29. out/eval/openllama_arxiv_mc/arxiv_mc_heatmap_acc.png +3 -0
  30. out/eval/openllama_benches/monthly_metrics.png +3 -0
  31. out/eval/openllama_ppl/val_ppl_heatmap.png +3 -0
  32. out/eval/qwen2_7b_question_focus/acc_heatmap.png +3 -0
  33. out/eval/qwen2_7b_question_focus/acc_norm_heatmap.png +3 -0
  34. out/eval/qwen2_7b_question_focus_lr_plus/acc_heatmap.png +3 -0
  35. out/eval/qwen2_7b_question_focus_lr_plus/acc_norm_heatmap.png +3 -0
  36. out/eval/qwen2_7b_question_focus_lr_plus/heatmap.png +3 -0
  37. out/eval/qwen2_7b_question_focus_lr_plus/positive_stability_curve.png +3 -0
  38. out/eval/qwen2_7b_question_focus_lr_plus/stability_curve.png +3 -0
  39. out/eval/qwen2_7b_question_focus_lr_plus/summary_heatmap.png +3 -0
  40. out/eval/qwen2_arxiv_mc/arxiv_mc_heatmap.png +3 -0
  41. out/eval/qwen2_arxiv_mc/arxiv_mc_heatmap_acc.png +3 -0
  42. out/eval/qwen2_ppl/val_ppl_heatmap.png +3 -0
  43. out/eval/tinyllama_3_epoch_arxiv_mc/arxiv_mc_heatmap.png +3 -0
  44. out/eval/tinyllama_3_epoch_arxiv_mc/arxiv_mc_heatmap_acc.png +3 -0
  45. out/eval/tinyllama_arxiv_mc/arxiv_mc_heatmap.png +3 -0
  46. out/eval/tinyllama_arxiv_mc/arxiv_mc_heatmap_acc.png +3 -0
  47. out/eval/tinyllama_benches/2407_full/results.json +3 -0
  48. out/eval/tinyllama_benches/2407_full/tokenizer.model +3 -0
  49. out/eval/tinyllama_benches/2408_full/results.json +3 -0
  50. out/eval/tinyllama_benches/monthly_metrics.png +3 -0
.gitattributes CHANGED
@@ -57,3 +57,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ out/eval/tinyllama_benches/2407_full/results.json filter=lfs diff=lfs merge=lfs -text
61
+ out/eval/tinyllama_benches/2408_full/results.json filter=lfs diff=lfs merge=lfs -text
.github/workflows/mkdocs-deploy.yml ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Deploy MkDocs
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+
7
+ permissions:
8
+ contents: write
9
+
10
+ jobs:
11
+ deploy:
12
+ runs-on: ubuntu-24.04
13
+ steps:
14
+ # Step 1: Checkout the repository
15
+ - uses: actions/checkout@v4
16
+
17
+ # Step 2: Set up Python
18
+ - uses: actions/setup-python@v5
19
+ with:
20
+ python-version: "3.x"
21
+ cache: "pip"
22
+
23
+ # Step 3: Install MkDocs and dependencies
24
+ - run: pip install mkdocs mkdocs-material mkdocs-pagetree-plugin
25
+ # Step 4: Deploy to GitHub Pages
26
+ - run: |
27
+ mkdir -p gh-pages/docs
28
+ cp -r tutorials/* gh-pages/docs
29
+ cd gh-pages
30
+ mv docs/mkdocs.yml mkdocs.yml
31
+ echo "{{ pagetree }}" > docs/index.md
32
+ mkdocs gh-deploy --force
.github/workflows/publish-pkg.yml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # To create a release, create a tag and push it to GitHub:
2
+ #git tag -a "v0.0.1-beta" -m "beta version testing"
3
+ #git push --tags
4
+ # https://dev.to/iamtekson/publish-package-to-pypi-and-release-new-version-using-github-actions-108k
5
+ name: Publish LitGPT to PyPI
6
+
7
+ on:
8
+ push:
9
+ tags:
10
+ - "v*"
11
+ jobs:
12
+ build-n-publish:
13
+ name: Build and publish to PyPI
14
+ runs-on: ubuntu-latest
15
+ environment:
16
+ name: pypi
17
+ url: https://pypi.org/p/litgpt
18
+ permissions:
19
+ id-token: write
20
+
21
+ steps:
22
+ - name: Checkout source
23
+ uses: actions/checkout@v4
24
+
25
+ - name: Set up Python
26
+ uses: actions/setup-python@v5
27
+ with:
28
+ python-version: "3.x"
29
+ cache: "pip"
30
+
31
+ - name: Build source and wheel distributions
32
+ run: |
33
+ python -m pip install --upgrade build twine
34
+ pip install importlib_metadata==7.2.1
35
+ python -m build
36
+ twine check --strict dist/*
37
+ - name: Publish distribution to PyPI
38
+ uses: pypa/gh-action-pypi-publish@release/v1
39
+ with:
40
+ user: __token__
41
+ password: ${{ secrets.PYPI_API_TOKEN }}
config_hub/eval/arxivcl.yaml ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The name of the model to pretrain. Choose from names in ``litgpt.config``. Mutually exclusive with
2
+ # ``model_config``. (type: Optional[str], default: null)
3
+ model_name: tiny-llama-1.1b
4
+
5
+ # A ``litgpt.Config`` object to define the model architecture. Mutually exclusive with
6
+ # ``model_config``. (type: Optional[Config], default: null)
7
+ model_config:
8
+
9
+ # Directory in which to save checkpoints and logs. If running in a Lightning Studio Job, look for it in
10
+ # /teamspace/jobs/<job-name>/share. (type: <class 'Path'>, default: out/pretrain)
11
+ out_dir: out/pretrain/2407
12
+
13
+ # The precision to use for pretraining. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null)
14
+ precision: bf16-mixed
15
+
16
+ # Optional path to a checkpoint directory to initialize the model from.
17
+ # Useful for continued pretraining. Mutually exclusive with ``resume``. (type: Optional[Path], default: null)
18
+ initial_checkpoint_dir: checkpoints/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
19
+
20
+ # Path to a checkpoint directory to resume from in case training was interrupted, or ``True`` to resume
21
+ # from the latest checkpoint in ``out_dir``. An error will be raised if no checkpoint is found. Passing
22
+ # ``'auto'`` will resume from the latest checkpoint but not error if no checkpoint exists.
23
+ # (type: Union[bool, Literal["auto"], Path], default: False)
24
+ resume: false
25
+
26
+ # Data-related arguments. If not provided, the default is ``litgpt.data.TinyLlama``.
27
+ data: Arxiv
28
+
29
+ # Data-Dir
30
+ data_dir:
31
+
32
+ # Training-related arguments. See ``litgpt.args.TrainArgs`` for details
33
+ train:
34
+ # Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000)
35
+ save_interval: 100
36
+
37
+ # Number of iterations between logging calls (type: int, default: 1)
38
+ log_interval: 1
39
+
40
+ # Number of samples between optimizer steps across data-parallel ranks (type: int, default: 512)
41
+ global_batch_size: 512
42
+
43
+ # Number of samples per data-parallel rank (type: int, default: 4)
44
+ micro_batch_size: 4
45
+
46
+ # Number of iterations with learning rate warmup active (type: int, default: 2000)
47
+ lr_warmup_steps: 20
48
+
49
+ # Number of epochs to train on (type: Optional[int], default: null)
50
+ epochs:
51
+
52
+ # Total number of tokens to train on (type: Optional[int], default: 3000000000000)
53
+ max_tokens: 209715200
54
+
55
+ # Limits the number of optimizer steps to run. (type: Optional[int], default: null)
56
+ max_steps:
57
+
58
+ # Limits the length of samples. Off by default (type: Optional[int], default: null)
59
+ max_seq_length: 2048
60
+
61
+ # Whether to tie the embedding weights with the language modeling head weights. (type: Optional[bool], default: False)
62
+ tie_embeddings:
63
+
64
+ # (type: Optional[float], default: 1.0)
65
+ max_norm: 1.0
66
+
67
+ # (type: float, default: 4e-05)
68
+ min_lr: 4.0e-05
69
+
70
+ # Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details
71
+ eval:
72
+ # Number of optimizer steps between evaluation calls (type: int, default: 1000)
73
+ interval: 50
74
+
75
+ # Number of tokens to generate (type: Optional[int], default: null)
76
+ max_new_tokens:
77
+
78
+ # Number of iterations (type: int, default: 100)
79
+ max_iters: 200
80
+
81
+ # Whether to evaluate on the validation set at the beginning of the training
82
+ initial_validation: false
83
+
84
+ # Whether to evaluate on the validation set at the end the training
85
+ final_validation: true
86
+
87
+ # Optimizer-related arguments
88
+ optimizer:
89
+ class_path: torch.optim.AdamW
90
+
91
+ init_args:
92
+ # (type: float, default: 0.001)
93
+ lr: 4e-4
94
+
95
+ # (type: float, default: 0.01)
96
+ weight_decay: 0.1
97
+
98
+ # (type: tuple, default: (0.9,0.999))
99
+ betas:
100
+ - 0.9
101
+ - 0.95
102
+
103
+ # How many devices/GPUs to use. Uses all GPUs by default. (type: Union[int, str], default: auto)
104
+ devices: auto
105
+
106
+ # How many nodes to use. (type: int, default: 1)
107
+ num_nodes: 1
108
+
109
+ # Optional path to the tokenizer dir that was used for preprocessing the dataset. Only some data
110
+ # module require this. (type: Optional[Path], default: null)
111
+ tokenizer_dir: checkpoints/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
112
+
113
+ # The name of the logger to send metrics to. (type: Literal['wandb', 'tensorboard', 'csv'], default: tensorboard)
114
+ logger_name: tensorboard
115
+
116
+ # The random seed to use for reproducibility. (type: int, default: 42)
117
+ seed: 42
config_hub/eval/openllama_cl_ppl.yaml ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The name of the model to pretrain. Choose from names in ``litgpt.config``. Mutually exclusive with
2
+ # ``model_config``. (type: Optional[str], default: null)
3
+ model_name: open_llama_3b
4
+
5
+ # A ``litgpt.Config`` object to define the model architecture. Mutually exclusive with
6
+ # ``model_config``. (type: Optional[Config], default: null)
7
+ model_config:
8
+
9
+ # Directory in which to save checkpoints and logs. If running in a Lightning Studio Job, look for it in
10
+ # /teamspace/jobs/<job-name>/share. (type: <class 'Path'>, default: out/pretrain)
11
+ out_dir: out/pretrain/2407
12
+
13
+ # The precision to use for pretraining. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null)
14
+ precision: bf16-mixed
15
+
16
+ # Optional path to a checkpoint directory to initialize the model from.
17
+ # Useful for continued pretraining. Mutually exclusive with ``resume``. (type: Optional[Path], default: null)
18
+ initial_checkpoint_dir: checkpoints/openlm-research/open_llama_3b
19
+
20
+ # Path to a checkpoint directory to resume from in case training was interrupted, or ``True`` to resume
21
+ # from the latest checkpoint in ``out_dir``. An error will be raised if no checkpoint is found. Passing
22
+ # ``'auto'`` will resume from the latest checkpoint but not error if no checkpoint exists.
23
+ # (type: Union[bool, Literal["auto"], Path], default: False)
24
+ resume: false
25
+
26
+ # Data-related arguments. If not provided, the default is ``litgpt.data.TinyLlama``.
27
+ data:
28
+ class_path: litgpt.data.Arxiv
29
+ init_args:
30
+ ppl: true
31
+ data_path:
32
+
33
+ # Data-Dir
34
+ data_dir: litgpt/data/arxiv_openllama_tokenized
35
+
36
+ # Training-related arguments. See ``litgpt.args.TrainArgs`` for details
37
+ train:
38
+ # Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000)
39
+ save_interval: 100
40
+
41
+ # Number of iterations between logging calls (type: int, default: 1)
42
+ log_interval: 1
43
+
44
+ # Number of samples between optimizer steps across data-parallel ranks (type: int, default: 512)
45
+ global_batch_size: 512
46
+
47
+ # Number of samples per data-parallel rank (type: int, default: 4)
48
+ micro_batch_size: 4
49
+
50
+ # Number of iterations with learning rate warmup active (type: int, default: 2000)
51
+ lr_warmup_steps: 20
52
+
53
+ # Number of epochs to train on (type: Optional[int], default: null)
54
+ epochs:
55
+
56
+ # Total number of tokens to train on (type: Optional[int], default: 3000000000000)
57
+ max_tokens: 209715200
58
+
59
+ # Limits the number of optimizer steps to run. (type: Optional[int], default: null)
60
+ max_steps:
61
+
62
+ # Limits the length of samples. Off by default (type: Optional[int], default: null)
63
+ max_seq_length: 2048
64
+
65
+ # Whether to tie the embedding weights with the language modeling head weights. (type: Optional[bool], default: False)
66
+ tie_embeddings:
67
+
68
+ # (type: Optional[float], default: 1.0)
69
+ max_norm: 1.0
70
+
71
+ # (type: float, default: 4e-05)
72
+ min_lr: 4.0e-05
73
+
74
+ # Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details
75
+ eval:
76
+ # Number of optimizer steps between evaluation calls (type: int, default: 1000)
77
+ interval: 50
78
+
79
+ # Number of tokens to generate (type: Optional[int], default: null)
80
+ max_new_tokens:
81
+
82
+ # Number of iterations (type: int, default: 100)
83
+ max_iters: 200
84
+
85
+ # Whether to evaluate on the validation set at the beginning of the training
86
+ initial_validation: false
87
+
88
+ # Whether to evaluate on the validation set at the end the training
89
+ final_validation: true
90
+
91
+ # Optimizer-related arguments
92
+ optimizer:
93
+ class_path: torch.optim.AdamW
94
+
95
+ init_args:
96
+ # (type: float, default: 0.001)
97
+ lr: 4e-4
98
+
99
+ # (type: float, default: 0.01)
100
+ weight_decay: 0.1
101
+
102
+ # (type: tuple, default: (0.9,0.999))
103
+ betas:
104
+ - 0.9
105
+ - 0.95
106
+
107
+ # How many devices/GPUs to use. Uses all GPUs by default. (type: Union[int, str], default: auto)
108
+ devices: auto
109
+
110
+ # How many nodes to use. (type: int, default: 1)
111
+ num_nodes: 1
112
+
113
+ # Optional path to the tokenizer dir that was used for preprocessing the dataset. Only some data
114
+ # module require this. (type: Optional[Path], default: null)
115
+ tokenizer_dir: checkpoints/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
116
+
117
+ # The name of the logger to send metrics to. (type: Literal['wandb', 'tensorboard', 'csv'], default: tensorboard)
118
+ logger_name: tensorboard
119
+
120
+ # The random seed to use for reproducibility. (type: int, default: 42)
121
+ seed: 42
122
+
123
+ multi_month: true
config_hub/eval/qwen2_cl_ppl.yaml ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The name of the model to pretrain. Choose from names in ``litgpt.config``. Mutually exclusive with
2
+ # ``model_config``. (type: Optional[str], default: null)
3
+ model_name: Qwen2-7B
4
+
5
+ # A ``litgpt.Config`` object to define the model architecture. Mutually exclusive with
6
+ # ``model_config``. (type: Optional[Config], default: null)
7
+ model_config:
8
+
9
+ # Directory in which to save checkpoints and logs. If running in a Lightning Studio Job, look for it in
10
+ # /teamspace/jobs/<job-name>/share. (type: <class 'Path'>, default: out/pretrain)
11
+ out_dir:
12
+
13
+ # The precision to use for pretraining. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null)
14
+ precision: bf16-true
15
+
16
+ # Optional path to a checkpoint directory to initialize the model from.
17
+ # Useful for continued pretraining. Mutually exclusive with ``resume``. (type: Optional[Path], default: null)
18
+ initial_checkpoint_dir: checkpoints/Qwen/Qwen2-7B
19
+
20
+ # Path to a checkpoint directory to resume from in case training was interrupted, or ``True`` to resume
21
+ # from the latest checkpoint in ``out_dir``. An error will be raised if no checkpoint is found. Passing
22
+ # ``'auto'`` will resume from the latest checkpoint but not error if no checkpoint exists.
23
+ # (type: Union[bool, Literal["auto"], Path], default: False)
24
+ resume: false
25
+
26
+ # Data-related arguments. If not provided, the default is ``litgpt.data.TinyLlama``.
27
+ data:
28
+ class_path: litgpt.data.Arxiv
29
+ init_args:
30
+ ppl: true
31
+ data_path:
32
+
33
+ data_dir: litgpt/data/arxiv_qwen2_tokenized
34
+
35
+
36
+ # Training-related arguments. See ``litgpt.args.TrainArgs`` for details
37
+ train:
38
+ # Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000)
39
+ save_interval: 9999
40
+
41
+ # Number of iterations between logging calls (type: int, default: 1)
42
+ log_interval: 1
43
+
44
+ # Number of samples between optimizer steps across data-parallel ranks (type: int, default: 512)
45
+ global_batch_size: 128
46
+
47
+ # Number of samples per data-parallel rank (type: int, default: 4)
48
+ micro_batch_size: 1
49
+
50
+ # Number of iterations with learning rate warmup active (type: int, default: 2000)
51
+ lr_warmup_steps: 0
52
+
53
+ # Number of epochs to train on (type: Optional[int], default: null)
54
+ epochs:
55
+
56
+ # Total number of tokens to train on (type: Optional[int], default: 3000000000000)
57
+ max_tokens: 3000000000000
58
+
59
+ # Limits the number of optimizer steps to run. (type: Optional[int], default: null)
60
+ max_steps:
61
+
62
+ # Limits the length of samples. Off by default (type: Optional[int], default: null)
63
+ max_seq_length: 8192
64
+
65
+ # Whether to tie the embedding weights with the language modeling head weights. (type: Optional[bool], default: False)
66
+ tie_embeddings:
67
+
68
+ # (type: Optional[float], default: 1.0)
69
+ max_norm: 1.0
70
+
71
+ # (type: float, default: 4e-05)
72
+ min_lr: 2.0e-05
73
+
74
+ # Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details
75
+ eval:
76
+ # Number of optimizer steps between evaluation calls (type: int, default: 1000)
77
+ interval: 1000
78
+
79
+ # Number of tokens to generate (type: Optional[int], default: null)
80
+ max_new_tokens:
81
+
82
+ # Number of iterations (type: int, default: 100)
83
+ max_iters: 200
84
+
85
+ # Whether to evaluate on the validation set at the beginning of the training
86
+ initial_validation: false
87
+
88
+ # Whether to evaluate on the validation set at the end the training
89
+ final_validation: true
90
+
91
+ # Optimizer-related arguments
92
+ optimizer:
93
+ class_path: torch.optim.AdamW
94
+
95
+ init_args:
96
+ # (type: float, default: 0.001)
97
+ lr: 2.0e-05
98
+
99
+ # (type: float, default: 0.01)
100
+ weight_decay: 0.1
101
+
102
+ # (type: tuple, default: (0.9,0.999))
103
+ betas:
104
+ - 0.9
105
+ - 0.95
106
+
107
+ # How many devices/GPUs to use. Uses all GPUs by default. (type: Union[int, str], default: auto)
108
+ devices: auto
109
+
110
+ # How many nodes to use. (type: int, default: 1)
111
+ num_nodes: 1
112
+
113
+ # Optional path to the tokenizer dir that was used for preprocessing the dataset. Only some data
114
+ # module require this. (type: Optional[Path], default: null)
115
+ tokenizer_dir: checkpoints/Qwen/Qwen2-7B
116
+
117
+ # The name of the logger to send metrics to. (type: Literal['wandb', 'tensorboard', 'csv'], default: tensorboard)
118
+ logger_name: tensorboard
119
+
120
+ # The random seed to use for reproducibility. (type: int, default: 42)
121
+ seed: 42
122
+
123
+ multi_month: true
config_hub/eval/tinyllama_cl.yaml ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The name of the model to pretrain. Choose from names in ``litgpt.config``. Mutually exclusive with
2
+ # ``model_config``. (type: Optional[str], default: null)
3
+ model_name: tiny-llama-1.1b
4
+
5
+ # A ``litgpt.Config`` object to define the model architecture. Mutually exclusive with
6
+ # ``model_config``. (type: Optional[Config], default: null)
7
+ model_config:
8
+
9
+ # Directory in which to save checkpoints and logs. If running in a Lightning Studio Job, look for it in
10
+ # /teamspace/jobs/<job-name>/share. (type: <class 'Path'>, default: out/pretrain)
11
+ out_dir: out/pretrain/2407
12
+
13
+ # The precision to use for pretraining. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null)
14
+ precision: bf16-mixed
15
+
16
+ # Optional path to a checkpoint directory to initialize the model from.
17
+ # Useful for continued pretraining. Mutually exclusive with ``resume``. (type: Optional[Path], default: null)
18
+ initial_checkpoint_dir: checkpoints/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
19
+
20
+ # Path to a checkpoint directory to resume from in case training was interrupted, or ``True`` to resume
21
+ # from the latest checkpoint in ``out_dir``. An error will be raised if no checkpoint is found. Passing
22
+ # ``'auto'`` will resume from the latest checkpoint but not error if no checkpoint exists.
23
+ # (type: Union[bool, Literal["auto"], Path], default: False)
24
+ resume: false
25
+
26
+ # Data-related arguments. If not provided, the default is ``litgpt.data.TinyLlama``.
27
+ data: Arxiv
28
+
29
+ # Data-Dir
30
+ data_dir:
31
+
32
+ # Training-related arguments. See ``litgpt.args.TrainArgs`` for details
33
+ train:
34
+ # Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000)
35
+ save_interval: 100
36
+
37
+ # Number of iterations between logging calls (type: int, default: 1)
38
+ log_interval: 1
39
+
40
+ # Number of samples between optimizer steps across data-parallel ranks (type: int, default: 512)
41
+ global_batch_size: 512
42
+
43
+ # Number of samples per data-parallel rank (type: int, default: 4)
44
+ micro_batch_size: 4
45
+
46
+ # Number of iterations with learning rate warmup active (type: int, default: 2000)
47
+ lr_warmup_steps: 20
48
+
49
+ # Number of epochs to train on (type: Optional[int], default: null)
50
+ epochs:
51
+
52
+ # Total number of tokens to train on (type: Optional[int], default: 3000000000000)
53
+ max_tokens: 209715200
54
+
55
+ # Limits the number of optimizer steps to run. (type: Optional[int], default: null)
56
+ max_steps:
57
+
58
+ # Limits the length of samples. Off by default (type: Optional[int], default: null)
59
+ max_seq_length: 2048
60
+
61
+ # Whether to tie the embedding weights with the language modeling head weights. (type: Optional[bool], default: False)
62
+ tie_embeddings:
63
+
64
+ # (type: Optional[float], default: 1.0)
65
+ max_norm: 1.0
66
+
67
+ # (type: float, default: 4e-05)
68
+ min_lr: 4.0e-05
69
+
70
+ # Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details
71
+ eval:
72
+ # Number of optimizer steps between evaluation calls (type: int, default: 1000)
73
+ interval: 50
74
+
75
+ # Number of tokens to generate (type: Optional[int], default: null)
76
+ max_new_tokens:
77
+
78
+ # Number of iterations (type: int, default: 100)
79
+ max_iters: 200
80
+
81
+ # Whether to evaluate on the validation set at the beginning of the training
82
+ initial_validation: false
83
+
84
+ # Whether to evaluate on the validation set at the end the training
85
+ final_validation: true
86
+
87
+ # Optimizer-related arguments
88
+ optimizer:
89
+ class_path: torch.optim.AdamW
90
+
91
+ init_args:
92
+ # (type: float, default: 0.001)
93
+ lr: 4e-4
94
+
95
+ # (type: float, default: 0.01)
96
+ weight_decay: 0.1
97
+
98
+ # (type: tuple, default: (0.9,0.999))
99
+ betas:
100
+ - 0.9
101
+ - 0.95
102
+
103
+ # How many devices/GPUs to use. Uses all GPUs by default. (type: Union[int, str], default: auto)
104
+ devices: auto
105
+
106
+ # How many nodes to use. (type: int, default: 1)
107
+ num_nodes: 1
108
+
109
+ # Optional path to the tokenizer dir that was used for preprocessing the dataset. Only some data
110
+ # module require this. (type: Optional[Path], default: null)
111
+ tokenizer_dir: checkpoints/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
112
+
113
+ # The name of the logger to send metrics to. (type: Literal['wandb', 'tensorboard', 'csv'], default: tensorboard)
114
+ logger_name: tensorboard
115
+
116
+ # The random seed to use for reproducibility. (type: int, default: 42)
117
+ seed: 42
config_hub/eval/tinyllama_cl_ppl.yaml ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The name of the model to pretrain. Choose from names in ``litgpt.config``. Mutually exclusive with
2
+ # ``model_config``. (type: Optional[str], default: null)
3
+ model_name: tiny-llama-1.1b
4
+
5
+ # A ``litgpt.Config`` object to define the model architecture. Mutually exclusive with
6
+ # ``model_config``. (type: Optional[Config], default: null)
7
+ model_config:
8
+
9
+ # Directory in which to save checkpoints and logs. If running in a Lightning Studio Job, look for it in
10
+ # /teamspace/jobs/<job-name>/share. (type: <class 'Path'>, default: out/pretrain)
11
+ out_dir: out/pretrain/2407
12
+
13
+ # The precision to use for pretraining. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null)
14
+ precision: bf16-mixed
15
+
16
+ # Optional path to a checkpoint directory to initialize the model from.
17
+ # Useful for continued pretraining. Mutually exclusive with ``resume``. (type: Optional[Path], default: null)
18
+ initial_checkpoint_dir: checkpoints/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
19
+
20
+ # Path to a checkpoint directory to resume from in case training was interrupted, or ``True`` to resume
21
+ # from the latest checkpoint in ``out_dir``. An error will be raised if no checkpoint is found. Passing
22
+ # ``'auto'`` will resume from the latest checkpoint but not error if no checkpoint exists.
23
+ # (type: Union[bool, Literal["auto"], Path], default: False)
24
+ resume: false
25
+
26
+ # Data-related arguments. If not provided, the default is ``litgpt.data.TinyLlama``.
27
+ data:
28
+ class_path: litgpt.data.Arxiv
29
+ init_args:
30
+ ppl: true
31
+ data_path:
32
+
33
+ # Data-Dir
34
+ data_dir:
35
+
36
+ # Training-related arguments. See ``litgpt.args.TrainArgs`` for details
37
+ train:
38
+ # Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000)
39
+ save_interval: 100
40
+
41
+ # Number of iterations between logging calls (type: int, default: 1)
42
+ log_interval: 1
43
+
44
+ # Number of samples between optimizer steps across data-parallel ranks (type: int, default: 512)
45
+ global_batch_size: 512
46
+
47
+ # Number of samples per data-parallel rank (type: int, default: 4)
48
+ micro_batch_size: 4
49
+
50
+ # Number of iterations with learning rate warmup active (type: int, default: 2000)
51
+ lr_warmup_steps: 20
52
+
53
+ # Number of epochs to train on (type: Optional[int], default: null)
54
+ epochs:
55
+
56
+ # Total number of tokens to train on (type: Optional[int], default: 3000000000000)
57
+ max_tokens: 209715200
58
+
59
+ # Limits the number of optimizer steps to run. (type: Optional[int], default: null)
60
+ max_steps:
61
+
62
+ # Limits the length of samples. Off by default (type: Optional[int], default: null)
63
+ max_seq_length: 2048
64
+
65
+ # Whether to tie the embedding weights with the language modeling head weights. (type: Optional[bool], default: False)
66
+ tie_embeddings:
67
+
68
+ # (type: Optional[float], default: 1.0)
69
+ max_norm: 1.0
70
+
71
+ # (type: float, default: 4e-05)
72
+ min_lr: 4.0e-05
73
+
74
+ # Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details
75
+ eval:
76
+ # Number of optimizer steps between evaluation calls (type: int, default: 1000)
77
+ interval: 50
78
+
79
+ # Number of tokens to generate (type: Optional[int], default: null)
80
+ max_new_tokens:
81
+
82
+ # Number of iterations (type: int, default: 100)
83
+ max_iters: 200
84
+
85
+ # Whether to evaluate on the validation set at the beginning of the training
86
+ initial_validation: false
87
+
88
+ # Whether to evaluate on the validation set at the end the training
89
+ final_validation: true
90
+
91
+ # Optimizer-related arguments
92
+ optimizer:
93
+ class_path: torch.optim.AdamW
94
+
95
+ init_args:
96
+ # (type: float, default: 0.001)
97
+ lr: 4e-4
98
+
99
+ # (type: float, default: 0.01)
100
+ weight_decay: 0.1
101
+
102
+ # (type: tuple, default: (0.9,0.999))
103
+ betas:
104
+ - 0.9
105
+ - 0.95
106
+
107
+ # How many devices/GPUs to use. Uses all GPUs by default. (type: Union[int, str], default: auto)
108
+ devices: auto
109
+
110
+ # How many nodes to use. (type: int, default: 1)
111
+ num_nodes: 1
112
+
113
+ # Optional path to the tokenizer dir that was used for preprocessing the dataset. Only some data
114
+ # module require this. (type: Optional[Path], default: null)
115
+ tokenizer_dir: checkpoints/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
116
+
117
+ # The name of the logger to send metrics to. (type: Literal['wandb', 'tensorboard', 'csv'], default: tensorboard)
118
+ logger_name: tensorboard
119
+
120
+ # The random seed to use for reproducibility. (type: int, default: 42)
121
+ seed: 42
122
+
123
+ multi_month: true
extensions/thunder/strategies/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .thunder_ddp import ThunderDDPStrategy # noqa: F401
2
+ from .thunder_fsdp import ThunderFSDPStrategy # noqa: F401
extensions/thunder/strategies/thunder_ddp.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Fabric Strategy to support Thunder DDP: To be upstreamed into Fabric eventually."""
2
+
3
+ from contextlib import nullcontext
4
+ from datetime import timedelta
5
+ from typing import TYPE_CHECKING, Any, ContextManager, Dict, List, Optional, Tuple, Union
6
+
7
+ import torch
8
+ import torch.distributed
9
+ from lightning.fabric.accelerators.accelerator import Accelerator
10
+ from lightning.fabric.plugins.collectives.torch_collective import default_pg_timeout
11
+ from lightning.fabric.plugins.environments.cluster_environment import ClusterEnvironment
12
+ from lightning.fabric.plugins.io.checkpoint_io import CheckpointIO
13
+ from lightning.fabric.plugins.precision import Precision
14
+ from lightning.fabric.strategies.launchers.subprocess_script import _SubprocessScriptLauncher
15
+ from lightning.fabric.strategies.parallel import ParallelStrategy
16
+ from lightning.fabric.strategies.strategy import TBroadcast, _BackwardSyncControl
17
+ from lightning.fabric.utilities.distributed import (
18
+ ReduceOp,
19
+ _distributed_is_initialized,
20
+ _get_default_process_group_backend_for_device,
21
+ _init_dist_connection,
22
+ _sync_ddp_if_available,
23
+ )
24
+ from lightning.fabric.utilities.rank_zero import rank_zero_only
25
+ from lightning_utilities.core.rank_zero import rank_zero_only as utils_rank_zero_only
26
+ from torch import Tensor
27
+ from torch.nn import Module
28
+ from typing_extensions import override
29
+
30
+ from litgpt.utils import _THUNDER_AVAILABLE
31
+
32
+ if TYPE_CHECKING:
33
+ from thunder import Executor
34
+
35
+
36
+ class ThunderDDPStrategy(ParallelStrategy):
37
+ def __init__(
38
+ self,
39
+ accelerator: Optional[Accelerator] = None,
40
+ parallel_devices: Optional[List[torch.device]] = None,
41
+ cluster_environment: Optional[ClusterEnvironment] = None,
42
+ checkpoint_io: Optional[CheckpointIO] = None,
43
+ precision: Optional[Precision] = None,
44
+ jit: bool = True,
45
+ executors: Optional[Tuple[Union["Executor", str], ...]] = None,
46
+ process_group_backend: Optional[str] = None,
47
+ timeout: Optional[timedelta] = default_pg_timeout,
48
+ **kwargs: Any,
49
+ ):
50
+ r"""Strategy for Replicated Data Parallel provided by Lightning Thunder.
51
+
52
+ .. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature.
53
+
54
+ Arguments:
55
+ jit: Whether to automatically call ``thunder.jit(model)`` if necessary. Disable this if you are manually
56
+ jitting a function that includes the model.
57
+
58
+ executors: The list of Thunder executors to enable. They can be either string aliases for the executors
59
+ or the actual executor instances.
60
+
61
+ \**kwargs: See available parameters in :func:`thunder.distributed.ddp`.
62
+
63
+ """
64
+ if not _THUNDER_AVAILABLE:
65
+ raise ModuleNotFoundError(str(_THUNDER_AVAILABLE))
66
+ super().__init__(accelerator=accelerator, checkpoint_io=checkpoint_io, precision=precision)
67
+ self.parallel_devices = parallel_devices
68
+ self.cluster_environment: Optional[ClusterEnvironment] = cluster_environment
69
+
70
+ if not jit and executors is not None:
71
+ raise ValueError(f"Passing executors={executors} doesn't have an effect with `jit={jit}`")
72
+ self.jit = jit
73
+ self.executors = executors
74
+ self._num_nodes = 1
75
+ self._process_group_backend: Optional[str] = process_group_backend
76
+ self._timeout: Optional[timedelta] = timeout
77
+ self._backward_sync_control = _ThunderDataParalellBackwardSyncControl()
78
+ self._ddp_kwargs = kwargs
79
+
80
+ @property
81
+ @override
82
+ def root_device(self) -> torch.device:
83
+ assert self.parallel_devices is not None
84
+ return self.parallel_devices[self.local_rank]
85
+
86
+ @property
87
+ def num_nodes(self) -> int:
88
+ return self._num_nodes
89
+
90
+ @num_nodes.setter
91
+ def num_nodes(self, num_nodes: int) -> None:
92
+ # note that world ranks is related to num_nodes, when resetting it, need to reset world ranks
93
+ self._num_nodes = num_nodes
94
+
95
+ @property
96
+ def num_processes(self) -> int:
97
+ return len(self.parallel_devices) if self.parallel_devices is not None else 0
98
+
99
+ @property
100
+ @override
101
+ def distributed_sampler_kwargs(self) -> Dict[str, Any]:
102
+ return {"num_replicas": self.num_nodes * self.num_processes, "rank": self.global_rank}
103
+
104
+ @override
105
+ def _configure_launcher(self) -> None:
106
+ assert self.cluster_environment is not None
107
+ if not self.cluster_environment.creates_processes_externally:
108
+ self._launcher = _SubprocessScriptLauncher(self.cluster_environment, self.num_processes, self.num_nodes)
109
+
110
+ @property
111
+ def process_group_backend(self) -> Optional[str]:
112
+ return self._process_group_backend
113
+
114
+ @override
115
+ def _configure_launcher(self) -> None:
116
+ assert self.cluster_environment is not None
117
+ self._launcher = _SubprocessScriptLauncher(self.cluster_environment, self.num_processes, self.num_nodes)
118
+
119
+ @override
120
+ def setup_environment(self) -> None:
121
+ super().setup_environment()
122
+ self._setup_distributed()
123
+
124
+ @override
125
+ def setup_module(self, module: Module) -> Module:
126
+ import thunder
127
+
128
+ if (cd := thunder.compile_data(module)) is not None:
129
+ # the module was already jitted
130
+ if thunder.compile_stats(module).last_traces is not None:
131
+ raise RuntimeError(
132
+ "You already called `thunder.jit()` and generated an execution trace. It's too late to apply the"
133
+ " DDP transform. Remove the `forward` call before `fabric.setup()`"
134
+ )
135
+ assert cd.is_module # sanity check
136
+ ddp_module = thunder.distributed.ddp(cd.fn, **self._ddp_kwargs)
137
+ # update the compile data state
138
+ cd.fn = ddp_module
139
+ cd.process_group_for_ddp = ddp_module.process_group_for_ddp
140
+ return module
141
+ else:
142
+ module = thunder.distributed.ddp(module, **self._ddp_kwargs)
143
+ if not self.jit:
144
+ return module
145
+ return thunder.jit(module, executors=self.executors)
146
+
147
+ @override
148
+ def module_to_device(self, module: Module) -> None:
149
+ module.to(self.root_device)
150
+
151
+ @override
152
+ def all_reduce(
153
+ self, tensor: Tensor, group: Optional[Any] = None, reduce_op: Optional[Union[ReduceOp, str]] = "mean"
154
+ ) -> Tensor:
155
+ if isinstance(tensor, Tensor):
156
+ return _sync_ddp_if_available(tensor, group, reduce_op=reduce_op)
157
+ return tensor
158
+
159
+ @override
160
+ def barrier(self, *args: Any, **kwargs: Any) -> None:
161
+ if not _distributed_is_initialized():
162
+ return
163
+ if torch.distributed.get_backend() == "nccl":
164
+ torch.distributed.barrier(device_ids=[self.root_device.index])
165
+ else:
166
+ torch.distributed.barrier()
167
+
168
+ @override
169
+ def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast:
170
+ if not _distributed_is_initialized():
171
+ return obj
172
+
173
+ obj = [obj]
174
+ torch.distributed.broadcast_object_list(obj, src)
175
+ return obj[0]
176
+
177
+ def _setup_distributed(self) -> None:
178
+ self._set_world_ranks()
179
+ self._process_group_backend = self._get_process_group_backend()
180
+ assert self.cluster_environment is not None
181
+ _init_dist_connection(self.cluster_environment, self._process_group_backend, timeout=self._timeout)
182
+
183
+ def _get_process_group_backend(self) -> str:
184
+ return self._process_group_backend or _get_default_process_group_backend_for_device(self.root_device)
185
+
186
+ def _set_world_ranks(self) -> None:
187
+ if self.cluster_environment is not None:
188
+ self.cluster_environment.set_global_rank(self.node_rank * self.num_processes + self.local_rank)
189
+ self.cluster_environment.set_world_size(self.num_nodes * self.num_processes)
190
+ # `LightningEnvironment.set_global_rank` will do this too, but we cannot rely on that implementation detail
191
+ # additionally, for some implementations, the setter is a no-op, so it's safer to access the getter
192
+ rank_zero_only.rank = utils_rank_zero_only.rank = self.global_rank
193
+
194
+
195
+ class _ThunderDataParalellBackwardSyncControl(_BackwardSyncControl):
196
+ def __init__(self):
197
+ self._enabled = False
198
+
199
+ @override
200
+ def no_backward_sync(self, module: Module, enabled: bool) -> ContextManager:
201
+ """
202
+ In Thunder, we cannot use ``module.no_sync()`` because reduction happens at the end of the context manager.
203
+ It assumes that the user will reuse it across all gradient accumulation iterations:
204
+
205
+ .. code-block:: python
206
+
207
+ with model.no_sync():
208
+ for _ in range(len(gradient_accumulation_iters)):
209
+ fwd()
210
+ bwd() # uses no-sync-backward trace
211
+ fwd()
212
+ bwd() # uses regular-backward trace
213
+
214
+ However, Fabric is designed to the context manager every iteration:
215
+
216
+ .. code-block:: python
217
+
218
+ for i in range(iters):
219
+ is_accumulating = (i + 1) % gradient_accumulation_iters != 0
220
+ ctx = model.no_sync() if is_accumulating else nullcontext()
221
+ with ctx:
222
+ fwd()
223
+ bwd()
224
+
225
+ So we need to be smart about when to sync grads based on the ``enabled`` value.
226
+
227
+ More info in https://github.com/Lightning-AI/lit-thunder-LEGACY/issues/2085
228
+ """
229
+ if not getattr(module, "use_ddp", False) and not getattr(module, "use_fsdp", False):
230
+ raise TypeError(
231
+ "Blocking backward sync is only possible if the module passed to"
232
+ f" `{self.__class__.__name__}.no_backward_sync` is applied DDP or FSDP."
233
+ f" Got: {module.__class__.__name__}."
234
+ )
235
+
236
+ from thunder.distributed import skip_data_parallel_grad_sync
237
+
238
+ previous, self._enabled = self._enabled, enabled
239
+ if enabled:
240
+ return skip_data_parallel_grad_sync()
241
+ if not enabled and previous:
242
+ return _SyncGradsContextManager(module)
243
+ return nullcontext()
244
+
245
+
246
+ class _SyncGradsContextManager:
247
+ def __init__(self, module: Module) -> None:
248
+ self._module = module
249
+
250
+ @override
251
+ def __enter__(self) -> None:
252
+ from thunder.distributed import _sync_grads
253
+
254
+ _sync_grads(self._module)
255
+
256
+ @override
257
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
258
+ pass
extensions/thunder/strategies/thunder_fsdp.py ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Fabric Strategy to support Thunder FSDP: To be upstreamed into Fabric eventually."""
2
+
3
+ import shutil
4
+ from contextlib import ExitStack, nullcontext
5
+ from pathlib import Path
6
+ from typing import TYPE_CHECKING, Any, Callable, ContextManager, Dict, List, Literal, Optional, Tuple, Union
7
+
8
+ import torch
9
+ from lightning.fabric.accelerators.accelerator import Accelerator
10
+ from lightning.fabric.plugins.environments.cluster_environment import ClusterEnvironment
11
+ from lightning.fabric.plugins.io.checkpoint_io import CheckpointIO
12
+ from lightning.fabric.plugins.precision import Precision
13
+ from lightning.fabric.strategies.launchers.subprocess_script import _SubprocessScriptLauncher
14
+ from lightning.fabric.strategies.parallel import ParallelStrategy
15
+ from lightning.fabric.strategies.strategy import TBroadcast, _apply_filter, _Sharded, _validate_keys_for_strict_loading
16
+ from lightning.fabric.utilities.distributed import (
17
+ ReduceOp,
18
+ _distributed_is_initialized,
19
+ _get_default_process_group_backend_for_device,
20
+ _init_dist_connection,
21
+ _sync_ddp_if_available,
22
+ )
23
+ from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_2_2
24
+ from lightning.fabric.utilities.load import _METADATA_FILENAME, _move_state_into
25
+ from lightning.fabric.utilities.rank_zero import rank_zero_only
26
+ from lightning.fabric.utilities.seed import reset_seed
27
+ from lightning.fabric.utilities.types import _PATH, _Stateful
28
+ from lightning_utilities.core.rank_zero import rank_zero_only as utils_rank_zero_only
29
+ from torch import Tensor
30
+ from torch.nn import Module
31
+ from torch.optim import Optimizer
32
+ from typing_extensions import override
33
+
34
+ from extensions.thunder.strategies.thunder_ddp import _ThunderDataParalellBackwardSyncControl
35
+ from litgpt.utils import _THUNDER_AVAILABLE
36
+
37
+ if TYPE_CHECKING:
38
+ from thunder import Executor
39
+ from thunder.distributed import FSDPBucketingStrategy, FSDPType
40
+ from thunder.distributed.checkpoint import StateDictOptions
41
+
42
+ _FSDP_TYPE = Union[FSDPType, Literal["ZERO2", "ZERO3"]]
43
+ _BUCKETING_STRATEGY = Union[FSDPBucketingStrategy, Literal["NONE", "LAYER", "BLOCK"]]
44
+
45
+
46
+ class ThunderFSDPStrategy(ParallelStrategy, _Sharded):
47
+ def __init__(
48
+ self,
49
+ accelerator: Optional[Accelerator] = None,
50
+ parallel_devices: Optional[List[torch.device]] = None,
51
+ cluster_environment: Optional[ClusterEnvironment] = None,
52
+ checkpoint_io: Optional[CheckpointIO] = None,
53
+ precision: Optional[Precision] = None,
54
+ jit: bool = True,
55
+ executors: Optional[Tuple[Union["Executor", str], ...]] = None,
56
+ sharding_strategy: "_FSDP_TYPE" = "ZERO3",
57
+ bucketing_strategy: "_BUCKETING_STRATEGY" = "NONE",
58
+ state_dict_type: Literal["full", "sharded"] = "sharded",
59
+ **kwargs: Any,
60
+ ):
61
+ r"""Strategy for Fully Sharded Data Parallel provided by Lightning Thunder.
62
+
63
+ .. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature.
64
+
65
+ Fully Sharded Training shards the entire model across all available GPUs, allowing you to scale model
66
+ size, whilst using efficient communication to reduce overhead. In practice, this means we can remain
67
+ at parity with PyTorch DDP, whilst scaling our model sizes dramatically.
68
+
69
+ Arguments:
70
+ jit: Whether to automatically call ``thunder.jit(model)`` if necessary. Disable this if you are manually
71
+ jitting a function that includes the model.
72
+
73
+ executors: The list of Thunder executors to enable. They can be either string aliases for the executors
74
+ or the actual executor instances.
75
+
76
+ sharding_strategy: Select whether to shard model parameters, gradients, optimizer states, or a combination
77
+ of them:
78
+
79
+ - ``"ZERO3"``: Shards model parameters, gradients, and optimizer states (default).
80
+ - ``"ZERO2"``: Shards gradients and optimizer states only. Model parameters get replicated.
81
+
82
+ Also accepts a :class:`thunder.distributed.FSDPType` enum value.
83
+
84
+ bucketing_strategy: Enables combining the collective operations for sets of layers.
85
+
86
+ - ``"NONE"``: No bucketing (default).
87
+ - ``"LAYER"``: Create buckets per layer class.
88
+ - ``"BLOCK"``: Create buckets per layer block.
89
+
90
+ Also accepts a :class:`thunder.distributed.FSDPBucketingStrategy` enum value.
91
+
92
+ state_dict_type: The format in which the state of the model and optimizers gets saved into the checkpoint.
93
+
94
+ - ``"full"``: The full weights and optimizer states get assembled on rank 0 and saved to a single file
95
+ (default).
96
+ - ``"sharded"``: Each rank saves its shard of weights and optimizer states to a file. The checkpoint is
97
+ a folder with as many files as the world size.
98
+
99
+ \**kwargs: See available parameters in :func:`thunder.distributed.fsdp`.
100
+
101
+ """
102
+ if not _TORCH_GREATER_EQUAL_2_2:
103
+ raise ImportError("Thunder's FSDP strategy requires PyTorch 2.2 or higher.")
104
+ if not _THUNDER_AVAILABLE:
105
+ raise ModuleNotFoundError(str(_THUNDER_AVAILABLE))
106
+ super().__init__(accelerator=accelerator, checkpoint_io=checkpoint_io, precision=precision)
107
+ self.parallel_devices = parallel_devices
108
+ self.cluster_environment: Optional[ClusterEnvironment] = cluster_environment
109
+ from thunder.distributed import FSDPBucketingStrategy, FSDPType
110
+
111
+ self.sharding_strategy = (
112
+ FSDPType[sharding_strategy.upper()] if isinstance(sharding_strategy, str) else sharding_strategy
113
+ )
114
+ self.bucketing_strategy = (
115
+ FSDPBucketingStrategy[bucketing_strategy.upper()]
116
+ if isinstance(bucketing_strategy, str)
117
+ else bucketing_strategy
118
+ )
119
+ if not jit and executors is not None:
120
+ raise ValueError(f"Passing executors={executors} doesn't have an effect with `jit={jit}`")
121
+ self.jit = jit
122
+ self.executors = executors
123
+ self._state_dict_type = state_dict_type
124
+ self._backward_sync_control = _ThunderDataParalellBackwardSyncControl()
125
+ self._fsdp_kwargs = kwargs
126
+
127
+ @property
128
+ @override
129
+ def root_device(self) -> torch.device:
130
+ assert self.parallel_devices is not None
131
+ return self.parallel_devices[self.local_rank]
132
+
133
+ @property
134
+ def num_nodes(self) -> int:
135
+ return 1
136
+
137
+ @property
138
+ def num_processes(self) -> int:
139
+ return len(self.parallel_devices) if self.parallel_devices is not None else 0
140
+
141
+ @property
142
+ @override
143
+ def distributed_sampler_kwargs(self) -> Dict[str, Any]:
144
+ return {"num_replicas": self.num_nodes * self.num_processes, "rank": self.global_rank}
145
+
146
+ @override
147
+ def _configure_launcher(self) -> None:
148
+ assert self.cluster_environment is not None
149
+ if not self.cluster_environment.creates_processes_externally:
150
+ self._launcher = _SubprocessScriptLauncher(self.cluster_environment, self.num_processes, self.num_nodes)
151
+
152
+ @override
153
+ def setup_environment(self) -> None:
154
+ super().setup_environment()
155
+ self._setup_distributed()
156
+
157
+ @override
158
+ def setup_module(self, module: Module) -> Module:
159
+ import thunder
160
+
161
+ if (cd := thunder.compile_data(module)) is not None:
162
+ # the module was already jitted
163
+ if thunder.compile_stats(module).last_traces is not None:
164
+ raise RuntimeError(
165
+ "You already called `thunder.jit()` and generated an execution trace. It's too late to apply the"
166
+ " FSDP transform. Remove the `forward` call before `fabric.setup()`"
167
+ )
168
+ assert cd.is_module # sanity check
169
+ fsdp_module = thunder.distributed.fsdp(
170
+ cd.fn,
171
+ device=self.root_device,
172
+ sharding_strategy=self.sharding_strategy,
173
+ bucketing_strategy=self.bucketing_strategy,
174
+ **self._fsdp_kwargs,
175
+ )
176
+ # update the compile data state
177
+ cd.fn = fsdp_module
178
+ cd.process_group_for_ddp = fsdp_module.process_group_for_ddp
179
+ return module
180
+ else:
181
+ module = thunder.distributed.fsdp(
182
+ module,
183
+ device=self.root_device,
184
+ sharding_strategy=self.sharding_strategy,
185
+ bucketing_strategy=self.bucketing_strategy,
186
+ **self._fsdp_kwargs,
187
+ )
188
+ if not self.jit:
189
+ return module
190
+ return thunder.jit(module, executors=self.executors)
191
+
192
+ @override
193
+ def module_to_device(self, module: Module) -> None:
194
+ pass
195
+
196
+ @override
197
+ def module_init_context(self, empty_init: Optional[bool] = None) -> ContextManager:
198
+ precision_init_ctx = self.precision.module_init_context()
199
+ module_sharded_ctx = self.module_sharded_context()
200
+ stack = ExitStack()
201
+ if empty_init:
202
+ # Materialization happens in `setup`. When modules get wrapped by FSDP
203
+ stack.enter_context(torch.device("meta"))
204
+ stack.enter_context(precision_init_ctx)
205
+ stack.enter_context(module_sharded_ctx)
206
+ return stack
207
+
208
+ @override
209
+ def module_sharded_context(self) -> ContextManager:
210
+ return nullcontext()
211
+
212
+ @override
213
+ def all_reduce(
214
+ self, tensor: Tensor, group: Optional[Any] = None, reduce_op: Optional[Union[ReduceOp, str]] = "mean"
215
+ ) -> Tensor:
216
+ if isinstance(tensor, Tensor):
217
+ return _sync_ddp_if_available(tensor, group, reduce_op=reduce_op)
218
+ return tensor
219
+
220
+ @override
221
+ def barrier(self, *args: Any, **kwargs: Any) -> None:
222
+ if not _distributed_is_initialized():
223
+ return
224
+ if torch.distributed.get_backend() == "nccl":
225
+ torch.distributed.barrier(device_ids=[self.root_device.index])
226
+ else:
227
+ torch.distributed.barrier()
228
+
229
+ @override
230
+ def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast:
231
+ if not _distributed_is_initialized():
232
+ return obj
233
+
234
+ obj = [obj]
235
+ torch.distributed.broadcast_object_list(obj, src)
236
+ return obj[0]
237
+
238
+ @override
239
+ def clip_gradients_norm(
240
+ self,
241
+ module: Module,
242
+ optimizer: Optimizer,
243
+ max_norm: Union[float, int],
244
+ norm_type: Union[float, int] = 2.0,
245
+ error_if_nonfinite: bool = True,
246
+ ) -> Tensor:
247
+ raise NotImplementedError
248
+
249
+ @override
250
+ def save_checkpoint(
251
+ self,
252
+ path: _PATH,
253
+ state: Dict[str, Union[Module, Optimizer, Any]],
254
+ storage_options: Optional[Any] = None,
255
+ filter: Optional[Dict[str, Callable[[str, Any], bool]]] = None,
256
+ ) -> None:
257
+ if storage_options is not None:
258
+ raise TypeError(
259
+ "`FSDPStrategy.save_checkpoint(..., storage_options=...)` is not supported because"
260
+ " `FSDPStrategy` does not use the `CheckpointIO`."
261
+ )
262
+ if filter is not None:
263
+ raise NotImplementedError("Filtering checkpoint paths is not implemented")
264
+
265
+ # broadcast the path from rank 0 to ensure all the states are saved in a common path
266
+ path = Path(self.broadcast(path))
267
+ if path.is_dir() and self._state_dict_type == "full" and not _is_sharded_checkpoint(path):
268
+ raise IsADirectoryError(f"The checkpoint path exists and is a directory: {path}")
269
+
270
+ from thunder.distributed.checkpoint import StateDictOptions, has_fsdp_modules, save
271
+
272
+ modules = [module for module in state.values() if has_fsdp_modules(module)]
273
+ if len(modules) == 0:
274
+ raise ValueError(
275
+ "Could not find a FSDP model in the provided checkpoint state. Please provide the model as"
276
+ " part of the state like so: `save_checkpoint(..., state={'model': model, ...})`. Make sure"
277
+ " you set up the model (and optimizers if any) through the strategy before saving the checkpoint."
278
+ )
279
+ if len(modules) > 1:
280
+ raise ValueError(
281
+ "Found multiple FSDP models in the given state. Saving checkpoints with FSDP is"
282
+ " currently limited to a single model per checkpoint. To save multiple models, call the"
283
+ " save method for each model separately with a different path."
284
+ )
285
+
286
+ if self._state_dict_type == "sharded":
287
+ if _is_full_checkpoint(path):
288
+ path.unlink()
289
+ path.mkdir(parents=True, exist_ok=True)
290
+
291
+ options = StateDictOptions(full_state_dict=False, cpu_offload=True, rank0_only=False)
292
+ converted_state, metadata = _get_state_dict(state, filter, options, self.local_rank)
293
+ save(converted_state, path)
294
+ if self.global_rank == 0:
295
+ torch.save(metadata, path / _METADATA_FILENAME)
296
+
297
+ elif self._state_dict_type == "full":
298
+ if _is_sharded_checkpoint(path):
299
+ shutil.rmtree(path)
300
+
301
+ options = StateDictOptions(full_state_dict=True, cpu_offload=True, rank0_only=True)
302
+ converted_state, metadata = _get_state_dict(state, filter, options, self.local_rank)
303
+ converted_state.update(metadata)
304
+ if self.global_rank == 0:
305
+ torch.save(converted_state, path)
306
+ else:
307
+ raise ValueError(f"Unknown state_dict_type: {self._state_dict_type}")
308
+
309
+ @override
310
+ def load_checkpoint(
311
+ self,
312
+ path: _PATH,
313
+ state: Optional[Union[Module, Optimizer, Dict[str, Union[Module, Optimizer, Any]]]] = None,
314
+ strict: bool = True,
315
+ ) -> Dict[str, Any]:
316
+ if not state:
317
+ raise ValueError(
318
+ f"Got `FSDPStrategy.load_checkpoint(..., state={state!r})` but a state with at least"
319
+ " a model instance to reload is required. Pass it in like so:"
320
+ " `FSDPStrategy.load_checkpoint(..., state={'model': model, ...})`"
321
+ )
322
+ # broadcast the path from rank 0 to ensure all the states are loaded from a common path
323
+ path = Path(self.broadcast(path))
324
+
325
+ from thunder.distributed.checkpoint import StateDictOptions, has_fsdp_modules, load, load_model_state_dict
326
+
327
+ if isinstance(state, Module):
328
+ if not _is_full_checkpoint(path):
329
+ raise ValueError(
330
+ "Failed to load checkpoint directly into the model. The given path must be a single file"
331
+ f" containing the full state dict: {path}"
332
+ )
333
+ state_dict = torch.load(str(path), mmap=True, map_location="cpu")
334
+ options = StateDictOptions(full_state_dict=True, cpu_offload=True, strict=strict, rank0_only=False)
335
+ load_model_state_dict(state_dict, _unwrap_tom(state), options, self.local_rank)
336
+ return {}
337
+
338
+ if isinstance(state, Optimizer):
339
+ raise NotImplementedError(
340
+ "Loading a single optimizer object from a checkpoint is not supported yet with the FSDP strategy."
341
+ )
342
+
343
+ modules = {key: module for key, module in state.items() if has_fsdp_modules(module)}
344
+ if len(modules) == 0:
345
+ raise ValueError(
346
+ "Could not find a FSDP model in the provided checkpoint state. Please provide the model as"
347
+ " part of the state like so: `load_checkpoint(..., state={'model': model, ...})`. Make sure"
348
+ " you set up the model (and optimizers if any) through the strategy before loading the checkpoint."
349
+ )
350
+ if len(modules) > 1:
351
+ raise ValueError(
352
+ "Found multiple FSDP models in the given state. Loading checkpoints with FSDP is"
353
+ " currently limited to a single model per checkpoint. To load multiple models, call the"
354
+ " load method for each model separately with a different path."
355
+ )
356
+ optimizers = {key: optim for key, optim in state.items() if isinstance(optim, Optimizer)}
357
+ module_key, module = list(modules.items())[0]
358
+ module = _unwrap_tom(module)
359
+
360
+ if _is_sharded_checkpoint(path):
361
+ options = StateDictOptions(full_state_dict=False, cpu_offload=True, strict=strict, rank0_only=False)
362
+ # Load the DCP state dict, which requires a holder state dict
363
+ converted_state, _ = _get_state_dict(state, None, options, self.local_rank)
364
+ load(converted_state, path)
365
+ load_model_state_dict(converted_state[module_key], module, options, self.local_rank)
366
+
367
+ # Load metadata (anything not a module or optimizer)
368
+ metadata = torch.load(path / _METADATA_FILENAME)
369
+ requested_metadata_keys = state.keys() - modules.keys() - optimizers.keys()
370
+ _validate_keys_for_strict_loading(requested_metadata_keys, metadata.keys(), strict=strict)
371
+ for key in requested_metadata_keys:
372
+ if key not in metadata:
373
+ continue
374
+ state[key] = metadata.pop(key)
375
+ # return the remaining metadata that wasn't requested as part of `state`
376
+ return metadata
377
+
378
+ if _is_full_checkpoint(path):
379
+ options = StateDictOptions(full_state_dict=True, cpu_offload=True, strict=strict, rank0_only=False)
380
+ if not options.rank0_only or self.local_rank == 0:
381
+ map_location = "cpu" if options.cpu_offload else None
382
+ checkpoint = torch.load(str(path), mmap=True, map_location=map_location)
383
+ load_model_state_dict(checkpoint[module_key], module, options, self.local_rank)
384
+ else:
385
+ checkpoint = {}
386
+
387
+ requested_metadata_keys = state.keys() - modules.keys() - optimizers.keys()
388
+ _validate_keys_for_strict_loading(requested_metadata_keys, checkpoint.keys(), strict=strict)
389
+ # Load metadata (anything not a module or optimizer)
390
+ _move_state_into(source=checkpoint, destination=state, keys=requested_metadata_keys)
391
+ # return the remaining metadata that wasn't requested as part of `state`
392
+ return checkpoint
393
+
394
+ raise ValueError(
395
+ f"The path {str(path)!r} does not point to a valid checkpoint. Make sure the path points to either a"
396
+ " directory with FSDP checkpoint shards, or a single file with a full checkpoint."
397
+ )
398
+
399
+ def _setup_distributed(self) -> None:
400
+ reset_seed()
401
+ self._set_world_ranks()
402
+ process_group_backend = _get_default_process_group_backend_for_device(self.root_device)
403
+ assert self.cluster_environment is not None
404
+ _init_dist_connection(self.cluster_environment, process_group_backend)
405
+
406
+ def _set_world_ranks(self) -> None:
407
+ if self.cluster_environment is not None:
408
+ self.cluster_environment.set_global_rank(self.node_rank * self.num_processes + self.local_rank)
409
+ self.cluster_environment.set_world_size(self.num_nodes * self.num_processes)
410
+ # `LightningEnvironment.set_global_rank` will do this too, but we cannot rely on that implementation detail
411
+ # additionally, for some implementations, the setter is a no-op, so it's safer to access the getter
412
+ rank_zero_only.rank = utils_rank_zero_only.rank = self.global_rank
413
+
414
+
415
+ def _is_sharded_checkpoint(path: Path) -> bool:
416
+ """A heuristic check to determine whether the path points to a directory with checkpoint shards."""
417
+ return path.is_dir() and (path / _METADATA_FILENAME).is_file()
418
+
419
+
420
+ def _is_full_checkpoint(path: Path) -> bool:
421
+ return path.is_file()
422
+
423
+
424
+ def _get_state_dict(
425
+ state: Dict[str, Any],
426
+ filter: Optional[Dict[str, Callable[[str, Any], bool]]],
427
+ options: "StateDictOptions",
428
+ rank: int,
429
+ ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
430
+ from thunder.distributed.checkpoint import get_model_state_dict
431
+
432
+ # replace the modules and optimizer objects in the state with their local state dict
433
+ # and separate the user's metadata
434
+ converted_state: Dict[str, Any] = {}
435
+ metadata: Dict[str, Any] = {}
436
+ for key, obj in state.items():
437
+ converted: Any
438
+ if isinstance(obj, Module):
439
+ converted = get_model_state_dict(_unwrap_tom(obj), options, rank)
440
+ target_dict = converted_state
441
+ elif isinstance(obj, Optimizer):
442
+ # TODO: optimizer support
443
+ converted = obj.state_dict()
444
+ target_dict = converted_state
445
+ else: # everything not a module or optimizer is considered metadata
446
+ converted = obj.state_dict() if isinstance(obj, _Stateful) else obj
447
+ target_dict = metadata
448
+ _apply_filter(key, filter or {}, converted, target_dict)
449
+
450
+ return converted_state, metadata
451
+
452
+
453
+ def _unwrap_tom(obj: object) -> object:
454
+ # TODO: this unwrap won't be required when Fabric's `_unwrap_objects` supports Thunder
455
+ from thunder import ThunderModule
456
+
457
+ if isinstance(obj, ThunderModule):
458
+ return obj._model
459
+ return obj
extensions/thunder/unsloth/__init__.py ADDED
File without changes
extensions/thunder/unsloth/executor.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+ import sys
3
+ from pathlib import Path
4
+ from typing import Optional, Tuple
5
+
6
+ import torch
7
+ from torch import Tensor
8
+
9
+ import litgpt.model
10
+ from litgpt.model import LLaMAMLP as OriginalLLaMAMLP
11
+ from litgpt.utils import _THUNDER_AVAILABLE
12
+ from thunder.core.proxies import TensorProxy
13
+ from thunder.core.transforms import get_grad, mean_backward, put_grads
14
+ from thunder.extend import OperatorExecutor, register_executor
15
+ from thunder.torch import ne, sum, true_divide
16
+
17
+ if _THUNDER_AVAILABLE:
18
+ import thunder
19
+ import thunder.torch as ltorch
20
+
21
+ sys.path.append(str(Path(__file__).parent))
22
+
23
+ import kernels
24
+
25
+ unsloth_ex = OperatorExecutor("unsloth", version="0.1")
26
+ register_executor(unsloth_ex)
27
+
28
+
29
+ """
30
+ ====================
31
+ Cross Entropy Loss
32
+ ====================
33
+ """
34
+
35
+
36
+ def unsloth_cross_entropy_meta(logits: TensorProxy, labels: TensorProxy) -> Tuple[TensorProxy, TensorProxy]:
37
+ return (
38
+ TensorProxy(
39
+ shape=(logits.shape[0],),
40
+ # the cross entropy kernel only supports float32
41
+ dtype=thunder.dtypes.float32,
42
+ device=logits.device,
43
+ requires_grad=logits.requires_grad,
44
+ ),
45
+ TensorProxy(shape=(logits.shape[0],), dtype=thunder.dtypes.float32, device=logits.device, requires_grad=False),
46
+ )
47
+
48
+
49
+ unsloth_cross_entropy = unsloth_ex.register_operator(
50
+ "unsloth_cross_entropy", meta=unsloth_cross_entropy_meta, fn=kernels.cross_entropy_loss._cross_entropy_forward_impl
51
+ )
52
+
53
+
54
+ def unsloth_cross_entropy_backward_impl(dlosses: Tensor, logits: Tensor, labels: Tensor, logsumexp: Tensor) -> Tensor:
55
+ # clone() because the kernel writes the grads in the logits
56
+ return kernels.cross_entropy_loss._cross_entropy_backward_impl(dlosses, logits.clone(), logsumexp, labels)
57
+
58
+
59
+ def unsloth_cross_entropy_backward_meta(
60
+ dlosses: TensorProxy, logits: TensorProxy, logsumexp: TensorProxy, labels: TensorProxy
61
+ ) -> TensorProxy:
62
+ return thunder.TensorProxy(like=logits)
63
+
64
+
65
+ unsloth_cross_entropy_backward = unsloth_ex.register_operator(
66
+ "unsloth_cross_entropy_backward", meta=unsloth_cross_entropy_backward_meta, fn=unsloth_cross_entropy_backward_impl
67
+ )
68
+
69
+
70
+ def unsloth_cross_entropy_checker(
71
+ logits: TensorProxy,
72
+ labels: TensorProxy,
73
+ weight: Optional[TensorProxy] = None,
74
+ size_average: Optional[bool] = None,
75
+ ignore_index: int = -100,
76
+ reduce: Optional[bool] = None,
77
+ reduction: str = "mean",
78
+ label_smoothing: float = 0.0,
79
+ ) -> bool:
80
+ return (
81
+ weight is None
82
+ and size_average is None
83
+ and reduce is None
84
+ and reduction in ("none", "mean")
85
+ and ignore_index == -100
86
+ and label_smoothing == 0.0
87
+ and logits.device.type == "cuda"
88
+ and labels.device.type == "cuda"
89
+ )
90
+
91
+
92
+ def cross_entropy_to_unsloth(
93
+ logits: TensorProxy,
94
+ labels: TensorProxy,
95
+ weight: Optional[TensorProxy] = None,
96
+ size_average: Optional[bool] = None,
97
+ ignore_index: int = -100,
98
+ reduce: Optional[bool] = None,
99
+ reduction: str = "mean",
100
+ label_smoothing: float = 0.0,
101
+ ) -> Tuple[TensorProxy, TensorProxy]:
102
+ loss, logsumexp = unsloth_cross_entropy(logits, labels)
103
+ if reduction == "mean":
104
+ # "mean" reduction is not part of the kernel
105
+ # TODO: this doesn't consider that all elements could be masked, causing a division by 0
106
+ n_items = sum(ne(labels, -100))
107
+ loss = true_divide(sum(loss), n_items)
108
+ elif reduction != "none":
109
+ raise NotImplementedError(reduction)
110
+ return loss, logsumexp
111
+
112
+
113
+ def unsloth_cross_entropy_grad(
114
+ logits: TensorProxy,
115
+ labels: TensorProxy,
116
+ weight: Optional[TensorProxy] = None,
117
+ size_average: Optional[bool] = None,
118
+ ignore_index: int = -100,
119
+ reduce: Optional[bool] = None,
120
+ reduction: str = "mean",
121
+ label_smoothing: float = 0.0,
122
+ ) -> TensorProxy:
123
+ loss, logsumexp = cross_entropy_to_unsloth(**locals())
124
+ grad = get_grad(loss)
125
+ if reduction == "mean":
126
+ grad = mean_backward(logsumexp.ndim, logsumexp.shape, (0,), grad)
127
+ logits_grad = unsloth_cross_entropy_backward(grad, logits, labels, logsumexp)
128
+ put_grads((logits,), (logits_grad,))
129
+ return loss
130
+
131
+
132
+ # registers as cross entropy implementation, including the execution transform and now a grad transform
133
+ unsloth_ex.register_implementation(
134
+ ltorch.cross_entropy,
135
+ checker=unsloth_cross_entropy_checker,
136
+ execution_transform=lambda *args: cross_entropy_to_unsloth(*args)[0],
137
+ grad_transform=unsloth_cross_entropy_grad,
138
+ )
139
+
140
+
141
+ """
142
+ =========
143
+ RMSNorm
144
+ =========
145
+
146
+ The RMSNorm kernel is not integrated because it's not numerically equal and it doesn't compute the gradient for the
147
+ weight, just for the input.
148
+ """
149
+
150
+
151
+ """
152
+ ========
153
+ SwiGLU
154
+ ========
155
+ """
156
+
157
+
158
+ def swiglu(e: torch.Tensor, g: torch.Tensor) -> torch.Tensor:
159
+ return torch.nn.functional.silu(e) * g
160
+
161
+
162
+ class ThunderLLaMAMLP(OriginalLLaMAMLP):
163
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
164
+ x_fc_1 = self.fc_1(x)
165
+ x_fc_2 = self.fc_2(x)
166
+ x = swiglu(x_fc_1, x_fc_2)
167
+ return self.proj(x)
168
+
169
+
170
+ litgpt.model.LLaMAMLP = ThunderLLaMAMLP
171
+
172
+
173
+ def swiglu_forward_meta(e: TensorProxy, g: TensorProxy) -> TensorProxy:
174
+ return TensorProxy(like=e)
175
+
176
+
177
+ litgpt_swiglu = unsloth_ex.register_operator("litgpt_swiglu", meta=swiglu_forward_meta, fn=swiglu, replaces=swiglu)
178
+
179
+
180
+ unsloth_swiglu_forward = unsloth_ex.register_operator(
181
+ "unsloth_swiglu_forward", meta=swiglu_forward_meta, fn=lambda *args: kernels.swiglu_fg_kernel(*args)
182
+ )
183
+
184
+
185
+ def unsloth_swiglu_backward_meta(DW: TensorProxy, e: TensorProxy, g: TensorProxy) -> Tuple[TensorProxy, TensorProxy]:
186
+ return TensorProxy(like=g), TensorProxy(like=e)
187
+
188
+
189
+ def unsloth_swiglu_backward_fn(DW: Tensor, e: Tensor, g: Tensor) -> Tuple[Tensor, Tuple]:
190
+ B, T, n_embd = e.shape
191
+ e = e.view(-1, n_embd)
192
+ g = g.view(-1, n_embd)
193
+ DW, e, g = kernels.swiglu_DWf_DW_dfg_kernel(DW, e, g)
194
+ e = e.view(B, T, n_embd)
195
+ g = g.view(B, T, n_embd)
196
+ return g, e
197
+
198
+
199
+ unsloth_swiglu_backward = unsloth_ex.register_operator(
200
+ "unsloth_swiglu_backward", meta=unsloth_swiglu_backward_meta, fn=unsloth_swiglu_backward_fn
201
+ )
202
+
203
+
204
+ def swiglu_to_unsloth_checker(e: TensorProxy, g: TensorProxy) -> bool:
205
+ return e.device.type == "cuda" and g.device.type == "cuda"
206
+
207
+
208
+ def unsloth_swiglu_grad(e: TensorProxy, g: TensorProxy) -> TensorProxy:
209
+ h = unsloth_swiglu_forward(**locals())
210
+ grad = get_grad(h)
211
+ e_grad, g_grad = unsloth_swiglu_backward(grad, e, g)
212
+ put_grads((e, g), (e_grad, g_grad))
213
+ return h
214
+
215
+
216
+ unsloth_ex.register_implementation(
217
+ litgpt_swiglu,
218
+ checker=swiglu_to_unsloth_checker,
219
+ execution_transform=unsloth_swiglu_forward,
220
+ grad_transform=unsloth_swiglu_grad,
221
+ )
222
+
223
+
224
+ """
225
+ ======
226
+ RoPE
227
+ ======
228
+ """
229
+
230
+
231
+ def apply_rope_meta(x: TensorProxy, cos: TensorProxy, sin: TensorProxy) -> TensorProxy:
232
+ return TensorProxy(like=x)
233
+
234
+
235
+ apply_rope = unsloth_ex.register_operator(
236
+ "litgpt_apply_rope", like=apply_rope_meta, fn=litgpt.model.apply_rope, replaces=litgpt.model.apply_rope
237
+ )
238
+
239
+
240
+ def unsloth_apply_rope_meta(
241
+ Q: TensorProxy, cos: TensorProxy, sin: TensorProxy
242
+ ) -> Tuple[TensorProxy, TensorProxy, TensorProxy, int, int, int]:
243
+ batch, n_heads, seq_len, head_dim = Q.shape
244
+ assert seq_len <= cos.shape[-2]
245
+ BLOCK_SIZE, num_warps = kernels.calculate_settings(head_dim // 2)
246
+ div, mod = divmod(n_heads, kernels.rope_embedding.ROPE_GROUP_SIZE)
247
+ n_groups = div + (mod != 0)
248
+ return TensorProxy(like=Q), cos, sin, n_groups, BLOCK_SIZE, num_warps
249
+
250
+
251
+ unsloth_apply_rope = unsloth_ex.register_operator(
252
+ "unsloth_apply_rope", meta=unsloth_apply_rope_meta, fn=kernels._rope_embedding_forward_impl
253
+ )
254
+
255
+
256
+ def unsloth_apply_rope_backward_meta(
257
+ dY: TensorProxy, cos: TensorProxy, sin: TensorProxy, n_groups: int, BLOCK_SIZE: int, num_warps: int
258
+ ) -> TensorProxy:
259
+ return TensorProxy(like=dY)
260
+
261
+
262
+ unsloth_apply_rope_backward = unsloth_ex.register_operator(
263
+ "unsloth_apply_rope_backward", meta=unsloth_apply_rope_backward_meta, fn=kernels._rope_embedding_backward_impl
264
+ )
265
+
266
+
267
+ def apply_rope_to_unsloth_checker(x: TensorProxy, cos: TensorProxy, sin: TensorProxy) -> bool:
268
+ return len(x.shape) == 4 and x.device.type == "cuda" and cos.device.type == "cuda" and sin.device.type == "cuda"
269
+
270
+
271
+ def unsloth_apply_rope_grad(x: TensorProxy, cos: TensorProxy, sin: TensorProxy) -> TensorProxy:
272
+ Q, cos, sin, n_groups, BLOCK_SIZE, num_warps = unsloth_apply_rope(x, cos, sin)
273
+ dY = get_grad(Q)
274
+ dX = unsloth_apply_rope_backward(dY, cos, sin, n_groups, BLOCK_SIZE, num_warps)
275
+ put_grads((x,), (dX,))
276
+ return Q
277
+
278
+
279
+ unsloth_ex.register_implementation(
280
+ apply_rope,
281
+ checker=apply_rope_to_unsloth_checker,
282
+ execution_transform=lambda *args: unsloth_apply_rope(*args)[0],
283
+ grad_transform=unsloth_apply_rope_grad,
284
+ )
extensions/thunder/unsloth/kernels/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .cross_entropy_loss import _cross_entropy_backward_impl, _cross_entropy_forward_impl # noqa: F401
2
+ from .rope_embedding import ROPE_GROUP_SIZE, _rope_embedding_backward_impl, _rope_embedding_forward_impl # noqa: F401
3
+ from .swiglu import swiglu_DWf_DW_dfg_kernel, swiglu_fg_kernel # noqa: F401
4
+ from .utils import calculate_settings # noqa: F401
extensions/thunder/unsloth/kernels/cross_entropy_loss.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import torch
16
+
17
+ from litgpt.utils import _TRITON_AVAILABLE
18
+
19
+ from .utils import MAX_FUSED_SIZE, calculate_settings
20
+
21
+ if _TRITON_AVAILABLE:
22
+ import triton
23
+ import triton.language as tl
24
+
25
+
26
+ @triton.jit
27
+ def _cross_entropy_forward(
28
+ logits_ptr,
29
+ logits_row_stride,
30
+ loss_ptr,
31
+ logsumexp_ptr,
32
+ labels_ptr,
33
+ VOCAB_SIZE: tl.constexpr,
34
+ BLOCK_SIZE: tl.constexpr,
35
+ ):
36
+ """
37
+ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ]
38
+ Pi = exp(xi) / sum(exp(xi))
39
+ CE_i = -y log(p) = -y log[ exp(x) / sum(exp(x)) ]
40
+ = -y [ x - log[sum(exp(x))] ]
41
+ = y * (log[sum(exp(x))] - x)
42
+ If y == 0: CE_i = 0
43
+ If y == 1: CE_i = logsumexp - x
44
+
45
+ logsumexp is also stable
46
+ Take y = log[sum(exp(x))]
47
+ exp(y) = sum(exp(x))
48
+ exp(y) = sum(exp(x - c)*exp(c)) Since e^(x-c)*e^c = e^x
49
+ exp(y) = exp(c)*sum(exp(x - c))
50
+ y = log(exp(c)*sum(exp(x - c)))
51
+ y = c + log[sum(exp(x - c))]
52
+ This means we can set c = max(x) to make sure
53
+ exp(x - c) always is exp(x - max(x)).
54
+ This ensures exp(x - max(x))'s maximum is 1 as exp(0) = 1.
55
+ """
56
+ row_idx = tl.program_id(0)
57
+ logits_ptr += row_idx * logits_row_stride.to(tl.int64)
58
+ loss_ptr += row_idx
59
+ logsumexp_ptr += row_idx
60
+ labels_ptr += row_idx
61
+
62
+ col_offsets = tl.arange(0, BLOCK_SIZE)
63
+ mask = col_offsets < VOCAB_SIZE
64
+
65
+ label_idx = tl.load(labels_ptr).to(tl.int32)
66
+ logits = tl.load(logits_ptr + col_offsets, mask=mask, other=-float("inf")).to(tl.float32)
67
+ c = tl.max(logits, 0)
68
+ logsumexp = c + tl.log(tl.sum(tl.exp(logits - c), 0))
69
+
70
+ if label_idx != -100:
71
+ x = tl.load(logits_ptr + label_idx).to(tl.float32)
72
+ loss = logsumexp - x
73
+ else:
74
+ loss = 0.0
75
+ tl.store(logsumexp_ptr, logsumexp)
76
+ tl.store(loss_ptr, loss)
77
+
78
+
79
+ pass
80
+
81
+
82
+ @triton.jit
83
+ def _chunked_cross_entropy_forward(
84
+ logits_ptr,
85
+ logits_row_stride,
86
+ loss_ptr,
87
+ logsumexp_ptr,
88
+ labels_ptr,
89
+ VOCAB_SIZE: tl.constexpr,
90
+ N_CHUNKS: tl.constexpr,
91
+ BLOCK_SIZE: tl.constexpr,
92
+ ):
93
+ """
94
+ 256K vocab divided in 4 chunks
95
+
96
+ |-65536-| |-65536-| |-65536-| |-65536-|
97
+ |-------| |-------| |-------| |-------|
98
+ |-------| |-------| |-------| |-------|
99
+
100
+ If y == 0: CE_i = 0
101
+ If y == 1: CE_i = logsumexp - x
102
+
103
+ Notice we can do logsumexp for each chunk and then
104
+ logsumexp[chunk_sum(logsumexp)] == logsumexp
105
+
106
+ chunk_sum = log[chunk_sum(logsumexp)]
107
+ = log[exp(logsumexp(a)) + ... + exp(logsumexp(z))]
108
+ = log[exp(log[sum(exp(a))]) + ... + exp(log[sum(exp(z))])]
109
+ = log[sum(exp(a)) + ... + sum(exp(z))]
110
+ = logsumexp(x)
111
+
112
+ This means we can perform a logsumexp for each chunk, then do a
113
+ final logsumexp reduction!
114
+
115
+ Ie do: logsumexp(chunked_logsumexp) - x
116
+ """
117
+ row_idx = tl.program_id(0)
118
+ chunk_idx = tl.program_id(1)
119
+ logits_ptr += row_idx * logits_row_stride.to(tl.int64)
120
+ loss_ptr += row_idx
121
+ logsumexp_ptr += row_idx * N_CHUNKS + chunk_idx
122
+ labels_ptr += row_idx
123
+
124
+ col_offsets = chunk_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
125
+ mask = col_offsets < VOCAB_SIZE
126
+
127
+ label_idx = tl.load(labels_ptr).to(tl.int32)
128
+ logits = tl.load(logits_ptr + col_offsets, mask=mask, other=-float("inf")).to(tl.float32)
129
+ c = tl.max(logits, 0)
130
+ logsumexp = c + tl.log(tl.sum(tl.exp(logits - c), 0))
131
+
132
+ if chunk_idx == 0:
133
+ # logsumexp(chunked_logsumexp) - x
134
+ # Do the -x separately
135
+ if label_idx != -100:
136
+ x = tl.load(logits_ptr + label_idx).to(tl.float32)
137
+ loss = -1.0 * x
138
+ else:
139
+ loss = 0.0
140
+ tl.store(loss_ptr, loss)
141
+ pass
142
+ tl.store(logsumexp_ptr, logsumexp)
143
+
144
+
145
+ pass
146
+
147
+
148
+ @triton.jit
149
+ def _cross_entropy_backward(
150
+ logits_ptr,
151
+ logits_row_stride,
152
+ dloss_ptr,
153
+ dloss_row_stride,
154
+ logsumexp_ptr,
155
+ labels_ptr,
156
+ VOCAB_SIZE: tl.constexpr,
157
+ BLOCK_SIZE: tl.constexpr,
158
+ ):
159
+ """
160
+ CE_i = -y log(P) = y * (log[sum(exp(x))] - x)
161
+ dC/dx = d/dx (y * log[sum(exp(x))] - x * y)
162
+
163
+ From https://en.wikipedia.org/wiki/LogSumExp
164
+ d/dx logsumexp = exp(x) / sum(exp(x)) = softmax(x)
165
+
166
+ dC/dx = y * exp(x) / sum(exp(x)) - d/dx (x * y)
167
+ dC/dx = y * exp[ log[exp(x) / sum(exp(x))] ] using x = exp(log(x)) trick
168
+ dC/dx = y * exp[x - logsumexp] - d/dx (x * y)
169
+
170
+ If y == 0: dC/dx = 0
171
+ If y == 1 and x == label: dC/dlabel = exp[x - logsumexp] - 1
172
+ If y == 1 and x != label: dC/dx = exp[x - logsumexp]
173
+ """
174
+ row_idx = tl.program_id(0)
175
+ block_idx = tl.program_id(1)
176
+
177
+ logits_ptr += row_idx * logits_row_stride.to(tl.int64)
178
+ dloss_ptr += row_idx * dloss_row_stride
179
+ col_offsets = block_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
180
+ mask = col_offsets < VOCAB_SIZE
181
+ label_idx = tl.load(labels_ptr + row_idx).to(tl.int32)
182
+
183
+ if label_idx != -100:
184
+ dloss = tl.load(dloss_ptr)
185
+ else:
186
+ dloss = 0.0
187
+
188
+ x = tl.load(logits_ptr + col_offsets, mask=mask, other=-float("inf")).to(tl.float32)
189
+ logsumexp = tl.load(logsumexp_ptr + row_idx)
190
+ y = tl.exp(x - logsumexp)
191
+ y = tl.where(
192
+ col_offsets == label_idx,
193
+ y - 1.0, # exp(x - logsumexp) - 1
194
+ y, # exp(x - logsumexp)
195
+ )
196
+
197
+ # If y == 0: dC/dx = 0 ==> we already masked it to be = 0, so dloss = 0.
198
+ tl.store(logits_ptr + col_offsets, dloss * y, mask=mask)
199
+
200
+
201
+ pass
202
+
203
+
204
+ def _cross_entropy_forward_impl(logits, labels):
205
+ n_rows, vocab_size = logits.shape
206
+
207
+ div, mod = divmod(vocab_size, MAX_FUSED_SIZE)
208
+ n_chunks = div + (mod != 0)
209
+ losses = torch.empty(n_rows, dtype=torch.float32, device="cuda")
210
+
211
+ if n_chunks == 1:
212
+ # For small vocabs <= 65336 like Llama, Mistral
213
+ BLOCK_SIZE, num_warps = calculate_settings(vocab_size)
214
+ logsumexp = torch.empty(n_rows, dtype=torch.float32, device="cuda")
215
+
216
+ _cross_entropy_forward[(n_rows,)](
217
+ logits,
218
+ logits.stride(0),
219
+ losses,
220
+ logsumexp,
221
+ labels,
222
+ VOCAB_SIZE=vocab_size,
223
+ BLOCK_SIZE=BLOCK_SIZE,
224
+ num_warps=num_warps,
225
+ )
226
+ else:
227
+ # For large vocabs > 65336 like Gemma 256K
228
+ logsumexp = torch.empty(
229
+ (
230
+ n_rows,
231
+ n_chunks,
232
+ ),
233
+ dtype=torch.float32,
234
+ device="cuda",
235
+ )
236
+
237
+ _chunked_cross_entropy_forward[
238
+ (
239
+ n_rows,
240
+ n_chunks,
241
+ )
242
+ ](
243
+ logits,
244
+ logits.stride(0),
245
+ losses,
246
+ logsumexp,
247
+ labels,
248
+ VOCAB_SIZE=vocab_size,
249
+ N_CHUNKS=n_chunks,
250
+ BLOCK_SIZE=MAX_FUSED_SIZE,
251
+ num_warps=32,
252
+ )
253
+ # logsumexp(chunked_logsumexp) - x
254
+ # Do the -x separately
255
+ logsumexp = torch.logsumexp(logsumexp, dim=1) # Row sum
256
+ losses += logsumexp
257
+ losses.masked_fill_(labels == -100, 0) # Don't forget to mask padding out!
258
+
259
+ return losses, logsumexp
260
+
261
+
262
+ def _cross_entropy_backward_impl(dlosses, logits, logsumexp, labels):
263
+ n_rows, vocab_size = logits.shape
264
+
265
+ BLOCK_SIZE = 4096
266
+ div, mod = divmod(vocab_size, BLOCK_SIZE)
267
+ n_blocks = div + (mod != 0)
268
+
269
+ _cross_entropy_backward[
270
+ (
271
+ n_rows,
272
+ n_blocks,
273
+ )
274
+ ](
275
+ logits,
276
+ logits.stride(0),
277
+ dlosses,
278
+ dlosses.stride(0),
279
+ logsumexp,
280
+ labels,
281
+ VOCAB_SIZE=vocab_size,
282
+ BLOCK_SIZE=BLOCK_SIZE,
283
+ num_warps=8,
284
+ )
285
+ return logits
extensions/thunder/unsloth/kernels/rope_embedding.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from litgpt.utils import _TRITON_AVAILABLE
16
+
17
+ from .utils import calculate_settings
18
+
19
+ if _TRITON_AVAILABLE:
20
+ import triton
21
+ import triton.language as tl
22
+
23
+ ROPE_GROUP_SIZE = 4
24
+
25
+
26
+ @triton.heuristics(
27
+ {
28
+ "BACKWARD_PASS": lambda args: args["BACKWARD_PASS"],
29
+ }
30
+ )
31
+ @triton.jit
32
+ def _rope_embedding(
33
+ Q,
34
+ Q_row_stride,
35
+ cos,
36
+ cos_row_stride,
37
+ sin,
38
+ sin_row_stride,
39
+ seqlen,
40
+ head_dim: tl.constexpr,
41
+ n_heads: tl.constexpr,
42
+ BACKWARD_PASS: tl.constexpr,
43
+ BLOCK_SIZE: tl.constexpr,
44
+ ROPE_GROUP_SIZE: tl.constexpr = 4,
45
+ ):
46
+ """
47
+ Calculates the RoPE Embedding quickly
48
+ RoPE is Q * cos + rotate_half(Q) * sin
49
+ See our blog post for more info
50
+ """
51
+ row_position = tl.program_id(0)
52
+ group_head_position = tl.program_id(1)
53
+ col_offsets = tl.arange(0, BLOCK_SIZE)
54
+ half_head_dim = head_dim // 2
55
+ mask = col_offsets < half_head_dim
56
+
57
+ sin1 = tl.load(sin + (row_position % seqlen) * sin_row_stride + half_head_dim * 0 + col_offsets, mask=mask, other=0)
58
+ cos1 = tl.load(cos + (row_position % seqlen) * cos_row_stride + half_head_dim * 0 + col_offsets, mask=mask, other=0)
59
+
60
+ if BACKWARD_PASS:
61
+ # See our blog post for more info.
62
+ sin1 = -sin1
63
+ pass
64
+
65
+ # [TODO] Autotune ROPE_GROUP_SIZE to be 1, 2, 4, 8
66
+ head_start = group_head_position * ROPE_GROUP_SIZE
67
+ head_end = min((head_start + ROPE_GROUP_SIZE), n_heads)
68
+
69
+ # 10% Faster kernel from [HuyNguyen-hust](https://github.com/unslothai/unsloth/pull/238)
70
+ for k in range(head_start, head_end):
71
+ offs_q1 = row_position * Q_row_stride + k * head_dim + col_offsets
72
+ offs_q2 = row_position * Q_row_stride + k * head_dim + col_offsets + half_head_dim
73
+
74
+ # For Gemma - sometimes RoPE must be done in float32 and not bfloat16
75
+ Q1 = tl.load(Q + offs_q1, mask=mask, other=0).to(sin1.dtype)
76
+ Q2 = tl.load(Q + offs_q2, mask=mask, other=0).to(sin1.dtype)
77
+
78
+ tl.store(Q + offs_q1, Q1 * cos1 - Q2 * sin1, mask=mask)
79
+ tl.store(Q + offs_q2, Q2 * cos1 + Q1 * sin1, mask=mask)
80
+ pass
81
+
82
+
83
+ pass
84
+
85
+
86
+ def _rope_embedding_forward_impl(Q, cos, sin):
87
+ Q = Q.transpose(1, 2).clone()
88
+ cos, sin = cos.squeeze(), sin.squeeze()
89
+ batch, seq_len, n_heads, head_dim = Q.shape
90
+ Q = Q.reshape(batch * seq_len, n_heads * head_dim)
91
+ n_rows, n_cols = Q.shape
92
+ assert seq_len <= cos.shape[0]
93
+
94
+ # [TODO] Changing blocksize to head_dim//2 seems to have
95
+ # some concurrency / un-deterministic issues.
96
+ BLOCK_SIZE, num_warps = calculate_settings(head_dim // 2) # (head_dim//2)
97
+
98
+ # group_size = 4 # 4 or 8, too large group_size can hurt performance.
99
+ div, mod = divmod(n_heads, ROPE_GROUP_SIZE)
100
+ n_groups = div + (mod != 0)
101
+
102
+ _rope_embedding[
103
+ (
104
+ n_rows,
105
+ n_groups,
106
+ )
107
+ ](
108
+ Q,
109
+ Q.stride(0),
110
+ cos,
111
+ cos.stride(0),
112
+ sin,
113
+ sin.stride(0),
114
+ seq_len,
115
+ head_dim,
116
+ n_heads,
117
+ BACKWARD_PASS=False,
118
+ BLOCK_SIZE=BLOCK_SIZE,
119
+ num_warps=num_warps,
120
+ )
121
+ Q = Q.view(batch, seq_len, n_heads, head_dim)
122
+ Q = Q.transpose(1, 2)
123
+ return Q, cos, sin, n_groups, BLOCK_SIZE, num_warps
124
+
125
+
126
+ def _rope_embedding_backward_impl(dY, cos, sin, n_groups, BLOCK_SIZE, num_warps):
127
+ dY = dY.transpose(1, 2)
128
+ batch, seq_len, n_heads, head_dim = dY.shape
129
+ dY = dY.reshape(batch * seq_len, n_heads * head_dim)
130
+ # Must be reshape not view
131
+ n_rows, n_cols = dY.shape
132
+
133
+ _rope_embedding[
134
+ (
135
+ n_rows,
136
+ n_groups,
137
+ )
138
+ ](
139
+ dY,
140
+ dY.stride(0),
141
+ cos,
142
+ cos.stride(0),
143
+ sin,
144
+ sin.stride(0),
145
+ seq_len,
146
+ head_dim,
147
+ n_heads,
148
+ BACKWARD_PASS=True,
149
+ BLOCK_SIZE=BLOCK_SIZE,
150
+ num_warps=num_warps,
151
+ )
152
+ dY = dY.view(batch, seq_len, n_heads, head_dim)
153
+ dY = dY.transpose(1, 2)
154
+ return dY
extensions/thunder/unsloth/kernels/swiglu.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import torch
16
+
17
+ from litgpt.utils import _TRITON_AVAILABLE
18
+
19
+ if _TRITON_AVAILABLE:
20
+ import triton
21
+ import triton.language as tl
22
+
23
+
24
+ @triton.jit
25
+ def _fg_kernel(
26
+ e,
27
+ g,
28
+ h,
29
+ n_elements,
30
+ BLOCK_SIZE: tl.constexpr,
31
+ ):
32
+ block_idx = tl.program_id(0)
33
+ offsets = block_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
34
+ mask = offsets < n_elements
35
+
36
+ e_row = tl.load(e + offsets, mask=mask, other=0).to(tl.float32)
37
+ g_row = tl.load(g + offsets, mask=mask, other=0) # .to(tl.float32)
38
+
39
+ # f = e * sigmoid(e)
40
+ f_row = e_row * tl.sigmoid(e_row) # e_row / (1 + tl.exp(-e_row))
41
+ f_row = f_row.to(g_row.dtype) # Exact copy from HF
42
+ # h = f * g
43
+ h_row = f_row * g_row
44
+
45
+ # Store h
46
+ tl.store(h + offsets, h_row, mask=mask)
47
+
48
+
49
+ pass
50
+
51
+
52
+ def swiglu_fg_kernel(e, g):
53
+ batch, seq_len, hd = e.shape
54
+ n_elements = e.numel()
55
+ h = torch.empty((batch, seq_len, hd), dtype=e.dtype, device="cuda")
56
+ grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
57
+ _fg_kernel[grid](
58
+ e,
59
+ g,
60
+ h,
61
+ n_elements,
62
+ BLOCK_SIZE=1024,
63
+ )
64
+ return h
65
+
66
+
67
+ pass
68
+
69
+
70
+ @triton.jit
71
+ def _DWf_DW_dfg_kernel(
72
+ DW,
73
+ e,
74
+ g,
75
+ n_elements,
76
+ BLOCK_SIZE: tl.constexpr,
77
+ ):
78
+ """
79
+ e = e.float()
80
+ se = 1.0 / (1.0 + torch.exp(-e))
81
+ f = (se * e).to(dtype)
82
+ h = f * g
83
+ df = DW * f
84
+ dg = DW * g
85
+ de = (dg.float() * se * (1.0 + e * (1.0 - se))).to(dtype)
86
+ """
87
+ block_idx = tl.program_id(0)
88
+ offsets = block_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
89
+ mask = offsets < n_elements
90
+
91
+ DW_row = tl.load(DW + offsets, mask=mask, other=0) # .to(tl.float32)
92
+ e_row = tl.load(e + offsets, mask=mask, other=0).to(tl.float32)
93
+ g_row = tl.load(g + offsets, mask=mask, other=0) # .to(tl.float32)
94
+
95
+ # e = e.float()
96
+ # se = 1.0 / (1.0 + torch.exp(-e))
97
+ se_row = tl.sigmoid(e_row) # 1.0 / (1.0 + tl.exp(-e_row))
98
+ # f = (se * e).to(dtype)
99
+ f_row = se_row * e_row
100
+ f_row = f_row.to(DW_row.dtype)
101
+ # h = f * g
102
+ h_row = f_row * g_row
103
+ # df = DW * f
104
+ df_row = DW_row * f_row
105
+ # dg = DW * g
106
+ dg_row = DW_row * g_row
107
+ # de = (dg.float() * se * (1.0 + e * (1.0 - se))).to(dtype)
108
+ de_row = dg_row.to(tl.float32) * se_row * (1.0 + e_row * (1.0 - se_row))
109
+ de_row = de_row.to(DW_row.dtype)
110
+
111
+ # Store derivatives in buffers
112
+ tl.store(DW + offsets, h_row, mask=mask) # h = f * g
113
+ tl.store(e + offsets, df_row, mask=mask) # df = DW * f
114
+ tl.store(g + offsets, de_row, mask=mask) # de
115
+
116
+
117
+ pass
118
+
119
+
120
+ def swiglu_DWf_DW_dfg_kernel(DW, e, g):
121
+ batch_seq_len, hd = e.shape
122
+ n_elements = e.numel()
123
+ grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
124
+ _DWf_DW_dfg_kernel[grid](
125
+ DW,
126
+ e,
127
+ g,
128
+ n_elements,
129
+ BLOCK_SIZE=1024,
130
+ )
131
+ return DW, e, g
132
+
133
+
134
+ pass
extensions/thunder/unsloth/kernels/utils.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from litgpt.utils import _TRITON_AVAILABLE
17
+
18
+ if _TRITON_AVAILABLE:
19
+ import triton
20
+
21
+ MAX_FUSED_SIZE = 65536 # 2**16
22
+ next_power_of_2 = triton.next_power_of_2
23
+
24
+
25
+ def calculate_settings(n):
26
+ BLOCK_SIZE = next_power_of_2(n)
27
+ if BLOCK_SIZE > MAX_FUSED_SIZE:
28
+ raise RuntimeError(
29
+ f"Cannot launch Triton kernel since n = {n} exceeds the maximum CUDA blocksize = {MAX_FUSED_SIZE}."
30
+ )
31
+ num_warps = 4
32
+ if BLOCK_SIZE >= 32768:
33
+ num_warps = 32
34
+ elif BLOCK_SIZE >= 8192:
35
+ num_warps = 16
36
+ elif BLOCK_SIZE >= 2048:
37
+ num_warps = 8
38
+ return BLOCK_SIZE, num_warps
39
+
40
+
41
+ pass
extensions/xla/finetune/__init__ ADDED
File without changes
extensions/xla/finetune/adapter.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+
3
+ import os
4
+ import sys
5
+ import time
6
+ from pathlib import Path
7
+ from typing import Dict, List, Tuple
8
+
9
+ import lightning as L
10
+ import torch
11
+ import torch_xla.core.xla_model as xm
12
+ from lightning.fabric.accelerators import XLAAccelerator
13
+ from lightning.fabric.loggers import CSVLogger
14
+ from lightning.fabric.strategies import XLAFSDPStrategy
15
+ from lightning.fabric.utilities import ThroughputMonitor, measure_flops
16
+
17
+ from litgpt.adapter import GPT, Block, Config, adapter_filter, mark_only_adapter_as_trainable
18
+ from litgpt.tokenizer import Tokenizer
19
+ from litgpt.utils import check_valid_checkpoint_dir, chunked_cross_entropy, estimate_flops, lazy_load, num_parameters
20
+
21
+ # support running without installing as a package
22
+ wd = Path(__file__).parents[3].resolve()
23
+ sys.path.append(str(wd))
24
+
25
+ from xla.generate.base import generate # noqa: E402
26
+ from xla.scripts.prepare_alpaca import generate_prompt # noqa: E402
27
+ from xla.utils import rank_print, sequential_load_and_fsdp_wrap # noqa: E402
28
+
29
+ eval_interval = 200
30
+ save_interval = 200
31
+ eval_iters = 100
32
+ eval_max_new_tokens = 100
33
+ log_interval = 1
34
+ devices = XLAAccelerator.auto_device_count()
35
+ # the state of very large models will not fit on the system RAM, this flag can alleviate it by loading it on each rank
36
+ # sequentially
37
+ reduce_cpu_memory_usage_during_load = False
38
+
39
+ # Hyperparameters
40
+ learning_rate = 3e-3
41
+ batch_size = 4
42
+ micro_batch_size = batch_size
43
+ gradient_accumulation_iters = batch_size // micro_batch_size
44
+ assert gradient_accumulation_iters > 0
45
+ epoch_size = 50000 # train dataset size
46
+ num_epochs = 5
47
+ max_iters = num_epochs * (epoch_size // micro_batch_size) // devices
48
+ weight_decay = 0.02
49
+ warmup_steps = 2 * (epoch_size // micro_batch_size) // devices // gradient_accumulation_iters # 2 epochs
50
+
51
+ hparams = {k: v for k, v in locals().items() if isinstance(v, (int, float, str)) and not k.startswith("_")}
52
+
53
+
54
+ def setup(
55
+ *,
56
+ data_dir: Path = Path("data/alpaca"),
57
+ checkpoint_dir: Path = Path("checkpoints/tiiuae/falcon-7b"),
58
+ out_dir: Path = Path("out/adapter/alpaca"),
59
+ precision: str = "bf16-true",
60
+ ) -> None:
61
+ if devices > 1:
62
+ strategy = XLAFSDPStrategy(
63
+ auto_wrap_policy={Block},
64
+ activation_checkpointing_policy={Block},
65
+ state_dict_type="full", # change to "sharded" in multi-host environments where the filesystem is not shared
66
+ sequential_save=True,
67
+ )
68
+ else:
69
+ strategy = "auto"
70
+ logger = CSVLogger(out_dir.parent, out_dir.name, flush_logs_every_n_steps=log_interval)
71
+ fabric = L.Fabric(devices=devices, strategy=strategy, precision=precision, loggers=logger)
72
+ rank_print(fabric, hparams)
73
+ fabric.launch(main, data_dir, checkpoint_dir, out_dir)
74
+
75
+
76
+ def main(fabric: L.Fabric, data_dir: Path, checkpoint_dir: Path, out_dir: Path) -> None:
77
+ check_valid_checkpoint_dir(checkpoint_dir)
78
+
79
+ fabric.seed_everything(1337) # same seed for every process to init model (FSDP)
80
+
81
+ if fabric.global_rank == 0:
82
+ os.makedirs(out_dir, exist_ok=True)
83
+
84
+ train_data = torch.load(data_dir / "train.pt")
85
+ val_data = torch.load(data_dir / "test.pt")
86
+
87
+ config = Config.from_name(name=checkpoint_dir.name, adapter_start_layer=0)
88
+ checkpoint_path = checkpoint_dir / "lit_model.pth"
89
+ rank_print(fabric, f"Loading model {str(checkpoint_path)!r} with {config.__dict__}")
90
+
91
+ if reduce_cpu_memory_usage_during_load:
92
+ model = sequential_load_and_fsdp_wrap(fabric, lambda: GPT(config), checkpoint_path)
93
+ else:
94
+ with fabric.init_module(empty_init=False):
95
+ model = GPT(config)
96
+ checkpoint = lazy_load(checkpoint_path)
97
+ # strict=False because missing keys due to adapter weights not contained in state dict
98
+ model.load_state_dict(checkpoint, strict=False)
99
+
100
+ model = fabric.setup_module(model)
101
+ # mark as trainable only after sharding due to https://github.com/pytorch/xla/pull/5484
102
+ mark_only_adapter_as_trainable(model)
103
+ # these are not correct in the sharding case
104
+ rank_print(fabric, f"Number of trainable parameters: {num_parameters(model, requires_grad=True):,}")
105
+ rank_print(fabric, f"Number of non-trainable parameters: {num_parameters(model, requires_grad=False):,}")
106
+
107
+ trainable_params = [p for p in model.parameters() if p.requires_grad]
108
+ optimizer = torch.optim.SGD(trainable_params, lr=learning_rate)
109
+ optimizer = fabric.setup_optimizers(optimizer)
110
+
111
+ fabric.seed_everything(1337 + fabric.global_rank)
112
+
113
+ train_time = time.perf_counter()
114
+ train(fabric, model, optimizer, train_data, val_data, checkpoint_dir, out_dir)
115
+ rank_print(fabric, f"Training time: {(time.perf_counter() - train_time):.2f}s")
116
+
117
+ # Save the final checkpoint at the end of training
118
+ save_path = out_dir / "lit_model_adapter_finetuned.pth"
119
+ save_adapter_checkpoint(fabric, model, save_path)
120
+
121
+
122
+ def train(
123
+ fabric: L.Fabric,
124
+ model: GPT,
125
+ optimizer: torch.optim.Optimizer,
126
+ train_data: List[Dict],
127
+ val_data: List[Dict],
128
+ checkpoint_dir: Path,
129
+ out_dir: Path,
130
+ ) -> None:
131
+ tokenizer = Tokenizer(checkpoint_dir)
132
+ longest_seq_length = get_longest_seq_length(train_data)
133
+ model.max_seq_length = longest_seq_length
134
+ # to avoid recompilation, this script is configured to pad batches to the `longest_seq_length`
135
+ fabric.print(
136
+ f"The longest sequence length in the train data is {longest_seq_length}, the model's maximum sequence length is"
137
+ f" {model.max_seq_length} and context length is {model.config.block_size}"
138
+ )
139
+
140
+ with torch.device("meta"):
141
+ meta_model = GPT(model.config)
142
+ mark_only_adapter_as_trainable(meta_model)
143
+ # "estimated" is not as precise as "measured". Estimated is optimistic but widely used in the wild.
144
+ # When comparing MFU or FLOP numbers with other projects that use estimated FLOPs,
145
+ # consider passing `flops_per_batch=estimated_flops` instead
146
+ estimated_flops = estimate_flops(meta_model, training=True) * micro_batch_size
147
+ rank_print(fabric, f"Estimated TFLOPs: {estimated_flops * fabric.world_size / 1e12:.2f}")
148
+ # this assumes that all samples have a fixed length equal to the longest sequence length
149
+ # which is most likely false during finetuning
150
+ x = torch.randint(0, 1, (micro_batch_size, longest_seq_length))
151
+ forward_fn = lambda: meta_model(x) # noqa: F821
152
+ loss_fn = lambda y: chunked_cross_entropy(y, x, chunk_size=0) # noqa: F821
153
+ measured_flops = measure_flops(meta_model, forward_fn, loss_fn)
154
+ rank_print(fabric, f"Measured TFLOPs: {measured_flops * fabric.world_size / 1e12:.2f}")
155
+ del meta_model, x
156
+
157
+ throughput = ThroughputMonitor(fabric, window_size=50)
158
+ step_count = 0
159
+ total_t0 = time.perf_counter()
160
+
161
+ xm.mark_step()
162
+ for iter_num in range(1, max_iters + 1):
163
+ if step_count <= warmup_steps:
164
+ # linear warmup
165
+ lr = learning_rate * step_count / warmup_steps
166
+ for param_group in optimizer.param_groups:
167
+ param_group["lr"] = lr
168
+
169
+ iter_t0 = time.perf_counter()
170
+
171
+ input_ids, targets = get_batch(fabric, train_data, longest_seq_length)
172
+
173
+ is_accumulating = iter_num % gradient_accumulation_iters != 0
174
+ with fabric.no_backward_sync(model, enabled=is_accumulating):
175
+ logits = model(input_ids, lm_head_chunk_size=128)
176
+ xm.mark_step()
177
+ # shift the targets such that output n predicts token n+1
178
+ logits[-1] = logits[-1][..., :-1, :]
179
+ loss = chunked_cross_entropy(logits, targets[..., 1:])
180
+ fabric.backward(loss / gradient_accumulation_iters)
181
+ xm.mark_step()
182
+
183
+ if not is_accumulating:
184
+ optimizer.step()
185
+ optimizer.zero_grad()
186
+ step_count += 1
187
+ else:
188
+ xm.mark_step()
189
+
190
+ if iter_num % log_interval == 0:
191
+ t1 = time.perf_counter()
192
+ throughput.update(
193
+ time=t1 - total_t0,
194
+ batches=iter_num,
195
+ samples=iter_num * micro_batch_size,
196
+ lengths=iter_num * micro_batch_size * longest_seq_length,
197
+ flops=measured_flops * log_interval,
198
+ )
199
+ throughput.compute_and_log(step=iter_num)
200
+ rank_print(
201
+ fabric,
202
+ f"iter {iter_num} step {step_count}:"
203
+ # uncomment to print the loss. this will considerably slow down the iteration times
204
+ # + f" loss {loss.item():.4f},"
205
+ + f" iter time: {(t1 - iter_t0) * 1000:.2f}ms"
206
+ + (" (optimizer.step)" if not is_accumulating else ""),
207
+ )
208
+
209
+ if not is_accumulating and step_count % eval_interval == 0:
210
+ t0 = time.perf_counter()
211
+ val_loss = validate(fabric, model, val_data, tokenizer, longest_seq_length)
212
+ t1 = time.perf_counter() - t0
213
+ rank_print(fabric, f"step {iter_num}: val loss {val_loss.item():.4f}, val time: {t1 * 1000:.2f}ms")
214
+ fabric.barrier()
215
+ if not is_accumulating and step_count % save_interval == 0:
216
+ checkpoint_path = out_dir / f"iter-{iter_num:06d}-ckpt.pth"
217
+ save_adapter_checkpoint(fabric, model, checkpoint_path)
218
+
219
+
220
+ # xla does not support `inference_mode`: RuntimeError: Cannot set version_counter for inference tensor
221
+ @torch.no_grad()
222
+ def validate(
223
+ fabric: L.Fabric, model: GPT, val_data: List[Dict], tokenizer: Tokenizer, longest_seq_length: int
224
+ ) -> torch.Tensor:
225
+ rank_print(fabric, "Validating ...")
226
+ model.eval()
227
+ losses = torch.zeros(eval_iters)
228
+ xm.mark_step()
229
+ for k in range(eval_iters):
230
+ input_ids, targets = get_batch(fabric, val_data, longest_seq_length)
231
+ logits = model(input_ids)
232
+ xm.mark_step()
233
+ losses[k] = chunked_cross_entropy(logits[..., :-1, :], targets[..., 1:], chunk_size=0)
234
+ val_loss = losses.mean()
235
+
236
+ # produce an example:
237
+ instruction = "Recommend a movie for me to watch during the weekend and explain the reason."
238
+ rank_print(fabric, instruction)
239
+ sample = {"instruction": instruction, "input": ""}
240
+ prompt = generate_prompt(sample)
241
+ encoded = tokenizer.encode(prompt, device=fabric.device)
242
+ with fabric.init_tensor():
243
+ # do not set `max_seq_length=max_returned_token` because memory is not a concern here
244
+ model.set_kv_cache(batch_size=1)
245
+ output = generate(model, encoded, max_returned_tokens=len(encoded) + eval_max_new_tokens, temperature=0.8)
246
+ model.clear_kv_cache()
247
+ output = tokenizer.decode(output)
248
+ rank_print(fabric, output)
249
+
250
+ model.train()
251
+ return val_loss
252
+
253
+
254
+ def get_batch(fabric: L.Fabric, data: List[Dict], longest_seq_length: int) -> Tuple[torch.Tensor, torch.Tensor]:
255
+ ix = torch.randint(len(data), (micro_batch_size,))
256
+
257
+ input_ids = [data[i]["input_ids"].type(torch.int64) for i in ix]
258
+ labels = [data[i]["labels"].type(torch.int64) for i in ix]
259
+
260
+ def pad_right(x, pad_id):
261
+ # pad right using a fixed longest sequence length to avoid recompilation
262
+ n = longest_seq_length - len(x)
263
+ return torch.cat((x, torch.full((n,), pad_id, dtype=x.dtype)))
264
+
265
+ x = torch.stack([pad_right(x, pad_id=0) for x in input_ids])
266
+ y = torch.stack([pad_right(x, pad_id=-1) for x in labels])
267
+
268
+ x, y = fabric.to_device((x, y))
269
+ return x, y
270
+
271
+
272
+ def get_longest_seq_length(data: List[Dict]) -> int:
273
+ # find out the minimum max_seq_length required during fine-tuning (saves memory!)
274
+ return max(len(d["input_ids"]) for d in data)
275
+
276
+
277
+ def save_adapter_checkpoint(fabric: L.Fabric, model: torch.nn.Module, file_path: Path) -> None:
278
+ rank_print(fabric, f"Saving adapter weights to {str(file_path)!r}")
279
+ fabric.save(file_path, {"model": model}, filter={"model": adapter_filter})
280
+
281
+
282
+ if __name__ == "__main__":
283
+ from jsonargparse import CLI
284
+
285
+ CLI(setup)
extensions/xla/generate/__init__ ADDED
File without changes
extensions/xla/generate/adapter.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+
3
+ import sys
4
+ import time
5
+ from pathlib import Path
6
+ from typing import Optional
7
+
8
+ import lightning as L
9
+ from lightning.fabric.accelerators import XLAAccelerator
10
+ from lightning.fabric.strategies import XLAFSDPStrategy
11
+
12
+ from litgpt import Tokenizer
13
+ from litgpt.adapter import GPT, Block, Config
14
+ from litgpt.prompts import Alpaca
15
+ from litgpt.utils import check_valid_checkpoint_dir, lazy_load
16
+
17
+ # support running without installing as a package
18
+ wd = Path(__file__).parents[3].resolve()
19
+ sys.path.append(str(wd))
20
+
21
+ from xla.generate.base import generate # noqa: E402
22
+ from xla.utils import rank_print # noqa: E402
23
+
24
+
25
+ def setup(
26
+ prompt: str = "What food do llamas eat?",
27
+ *,
28
+ input: str = "",
29
+ sys_prompt: Optional[str] = None,
30
+ adapter_path: Path = Path("out/adapter/alpaca/lit_model_adapter_finetuned.pth"),
31
+ checkpoint_dir: Path = Path("checkpoints/tiiuae/falcon-7b"),
32
+ max_new_tokens: int = 100,
33
+ top_k: Optional[int] = 50,
34
+ temperature: float = 0.8,
35
+ precision: str = "bf16-true",
36
+ ) -> None:
37
+ """Generates a response based on a given instruction and an optional input.
38
+ This script will only work with checkpoints from the instruction-tuned Adapter model.
39
+ See `xla/finetune/adapter.py`.
40
+
41
+ Args:
42
+ prompt: The prompt/instruction (Alpaca style).
43
+ input: Optional input (Alpaca style).
44
+ sys_prompt: Optional system prompt.
45
+ adapter_path: Path to the checkpoint with trained adapter weights, which are the output of
46
+ `xla/finetune/adapter.py`.
47
+ checkpoint_dir: The path to the checkpoint folder with pretrained model weights.
48
+ max_new_tokens: The number of generation steps to take.
49
+ top_k: The number of top most probable tokens to consider in the sampling process.
50
+ temperature: A value controlling the randomness of the sampling process. Higher values result in more random
51
+ samples.
52
+ precision: Indicates the Fabric precision setting to use.
53
+ """
54
+ devices = XLAAccelerator.auto_device_count()
55
+ strategy = XLAFSDPStrategy(auto_wrap_policy={Block}) if devices > 1 else "auto"
56
+ fabric = L.Fabric(devices=devices, precision=precision, strategy=strategy)
57
+ fabric.launch(main, prompt, input, sys_prompt, adapter_path, checkpoint_dir, max_new_tokens, top_k, temperature)
58
+
59
+
60
+ def main(
61
+ fabric: L.Fabric,
62
+ prompt: str,
63
+ input: str,
64
+ sys_prompt: Optional[str],
65
+ adapter_path: Path,
66
+ checkpoint_dir: Path,
67
+ max_new_tokens: int,
68
+ top_k: Optional[int],
69
+ temperature: float,
70
+ ) -> None:
71
+ check_valid_checkpoint_dir(checkpoint_dir)
72
+
73
+ config = Config.from_file(checkpoint_dir / "model_config.yaml", adapter_start_layer=0)
74
+
75
+ checkpoint_path = checkpoint_dir / "lit_model.pth"
76
+
77
+ rank_print(fabric, f"Loading model {str(checkpoint_path)!r} with {config.__dict__}", file=sys.stderr)
78
+ t0 = time.perf_counter()
79
+ with fabric.init_module(empty_init=True):
80
+ model = GPT(config)
81
+ rank_print(fabric, f"Time to instantiate model: {time.perf_counter() - t0:.02f} seconds.", file=sys.stderr)
82
+
83
+ t0 = time.perf_counter()
84
+ checkpoint = lazy_load(checkpoint_path)
85
+ adapter_checkpoint = lazy_load(adapter_path)
86
+ checkpoint.update(adapter_checkpoint.get("model", adapter_checkpoint))
87
+ model.load_state_dict(checkpoint)
88
+ rank_print(fabric, f"Time to load the model weights: {time.perf_counter() - t0:.02f} seconds.", file=sys.stderr)
89
+
90
+ model.eval()
91
+ model = fabric.setup_module(model)
92
+
93
+ tokenizer = Tokenizer(checkpoint_dir)
94
+ # TODO: Load prompt style from checkpoint and apply it here
95
+ prompt_style = Alpaca()
96
+ prompt = prompt_style.apply(prompt, sys_prompt=sys_prompt, input=input)
97
+ encoded = tokenizer.encode(prompt, device=fabric.device)
98
+ prompt_length = encoded.size(0)
99
+ max_returned_tokens = prompt_length + max_new_tokens
100
+
101
+ with fabric.init_tensor():
102
+ # set the max_seq_length to limit the memory usage to what we need
103
+ model.max_seq_length = max_returned_tokens
104
+ # enable the kv cache
105
+ model.set_kv_cache(batch_size=1)
106
+
107
+ t0 = time.perf_counter()
108
+ y = generate(
109
+ model,
110
+ encoded,
111
+ max_returned_tokens,
112
+ max_seq_length=max_returned_tokens,
113
+ temperature=temperature,
114
+ top_k=top_k,
115
+ eos_id=tokenizer.eos_id,
116
+ )
117
+ t = time.perf_counter() - t0
118
+
119
+ output = tokenizer.decode(y)
120
+ output = output.split("### Response:")[1] if "### Response:" in output else output
121
+ output = output.strip()
122
+ fabric.print(output)
123
+
124
+ tokens_generated = y.size(0) - prompt_length
125
+ rank_print(
126
+ fabric, f"\n\nTime for inference: {t:.02f} sec total, {tokens_generated / t:.02f} tokens/sec", file=sys.stderr
127
+ )
128
+
129
+
130
+ if __name__ == "__main__":
131
+ from jsonargparse import CLI
132
+
133
+ CLI(setup)
extensions/xla/generate/base.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+
3
+ import sys
4
+ import time
5
+ from pathlib import Path
6
+ from typing import Optional
7
+
8
+ import lightning as L
9
+ import torch
10
+ import torch_xla.core.xla_model as xm
11
+ from lightning.fabric.accelerators import XLAAccelerator
12
+ from lightning.fabric.strategies import XLAFSDPStrategy
13
+
14
+ from litgpt import GPT, Config, Tokenizer
15
+ from litgpt.model import Block
16
+ from litgpt.utils import check_valid_checkpoint_dir, lazy_load
17
+
18
+ # support running without installing as a package
19
+ wd = Path(__file__).parents[3].resolve()
20
+ sys.path.append(str(wd))
21
+
22
+ from xla.utils import rank_print # noqa: E402
23
+
24
+
25
+ # xla does not support `inference_mode`: RuntimeError: Cannot set version_counter for inference tensor
26
+ @torch.no_grad()
27
+ def generate(
28
+ model: GPT,
29
+ idx: torch.Tensor,
30
+ max_returned_tokens: int,
31
+ *,
32
+ temperature: float = 1.0,
33
+ top_k: Optional[int] = None,
34
+ eos_id: Optional[int] = None,
35
+ ) -> torch.Tensor:
36
+ """Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested.
37
+
38
+ The implementation of this function is modified from A. Karpathy's nanoGPT.
39
+
40
+ Args:
41
+ model: The model to use.
42
+ idx: Tensor of shape (T) with indices of the prompt sequence.
43
+ max_returned_tokens: The maximum number of tokens to return (given plus generated).
44
+ temperature: Scales the predicted logits by 1 / temperature.
45
+ top_k: If specified, only sample among the tokens with the k highest probabilities.
46
+ eos_id: If specified, stop generating any more token once the <eos> token is triggered.
47
+ """
48
+ T = idx.size(0)
49
+ assert max_returned_tokens > T
50
+ if model.max_seq_length < max_returned_tokens - 1:
51
+ # rolling the kv cache based on the `input_pos` value would be necessary. However, doing so would introduce a
52
+ # data dependency on the `input_pos` tensor and impact model compilation. Since this setting is uncommon, we do
53
+ # not support it to avoid negatively impacting the overall speed
54
+ raise NotImplementedError(f"max_seq_length {model.max_seq_length} needs to be >= {max_returned_tokens - 1}")
55
+
56
+ device, dtype = idx.device, idx.dtype
57
+ # create an empty tensor of the expected final shape and fill in the current tokens
58
+ empty = torch.empty(max_returned_tokens, dtype=dtype, device=device)
59
+ empty[:T] = idx
60
+ idx = empty
61
+ # TODO: FSDP has an internal broadcasting issue, so we are forced to have this be of length 1 until it's fixed
62
+ input_pos = torch.tensor([0], device=device)
63
+
64
+ xm.mark_step()
65
+
66
+ # generate up to a fixed number of tokens
67
+ for _ in range(max_returned_tokens):
68
+ x = idx.index_select(0, input_pos).view(1, -1)
69
+
70
+ # forward
71
+ logits = model(x, input_pos)
72
+ logits = logits[0, -1] / temperature
73
+
74
+ # optionally crop the logits to only the top k options
75
+ if top_k is not None:
76
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
77
+ logits = torch.where(logits < v[[-1]], -float("Inf"), logits)
78
+
79
+ probs = torch.nn.functional.softmax(logits, dim=-1)
80
+ idx_next = torch.multinomial(probs, num_samples=1).to(dtype=dtype)
81
+
82
+ # advance
83
+ input_pos = input_pos[-1:] + 1
84
+
85
+ xm.mark_step()
86
+
87
+ # concatenate the new generation
88
+ idx = idx.index_copy(0, input_pos, idx_next)
89
+
90
+ # if <eos> token is triggered, return the output (stop generation)
91
+ if idx_next == eos_id:
92
+ return idx[:input_pos] # include the EOS token
93
+
94
+ return idx
95
+
96
+
97
+ def setup(
98
+ prompt: str = "What food do llamas eat?",
99
+ *,
100
+ num_samples: int = 1,
101
+ max_new_tokens: int = 100,
102
+ top_k: Optional[int] = 50,
103
+ temperature: float = 0.8,
104
+ checkpoint_dir: Path = Path("checkpoints/tiiuae/falcon-7b"),
105
+ precision: str = "bf16-true",
106
+ ) -> None:
107
+ """Generates text samples based on a pre-trained model and tokenizer.
108
+
109
+ Args:
110
+ prompt: The prompt string to use for generating the samples.
111
+ num_samples: The number of text samples to generate.
112
+ max_new_tokens: The number of generation steps to take.
113
+ top_k: The number of top most probable tokens to consider in the sampling process.
114
+ temperature: A value controlling the randomness of the sampling process. Higher values result in more random
115
+ samples.
116
+ checkpoint_dir: The checkpoint directory to load.
117
+ precision: Indicates the Fabric precision setting to use.
118
+ """
119
+ devices = XLAAccelerator.auto_device_count()
120
+ strategy = XLAFSDPStrategy(auto_wrap_policy={Block}) if devices > 1 else "auto"
121
+ fabric = L.Fabric(devices=devices, precision=precision, strategy=strategy)
122
+ fabric.launch(main, prompt, num_samples, max_new_tokens, top_k, temperature, checkpoint_dir)
123
+
124
+
125
+ def main(
126
+ fabric: L.Fabric,
127
+ prompt: str,
128
+ num_samples: int,
129
+ max_new_tokens: int,
130
+ top_k: Optional[int],
131
+ temperature: float,
132
+ checkpoint_dir: Path,
133
+ ) -> None:
134
+ check_valid_checkpoint_dir(checkpoint_dir)
135
+
136
+ config = Config.from_file(checkpoint_dir / "model_config.yaml")
137
+
138
+ checkpoint_path = checkpoint_dir / "lit_model.pth"
139
+
140
+ rank_print(fabric, f"Loading model {str(checkpoint_path)!r} with {config.__dict__}", file=sys.stderr)
141
+ t0 = time.perf_counter()
142
+ with fabric.init_module(empty_init=True):
143
+ model = GPT(config)
144
+ rank_print(fabric, f"Time to instantiate model: {time.perf_counter() - t0:.02f} seconds.", file=sys.stderr)
145
+
146
+ t0 = time.perf_counter()
147
+ checkpoint = lazy_load(checkpoint_path)
148
+ model.load_state_dict(checkpoint.get("model", checkpoint))
149
+ rank_print(fabric, f"Time to load the model weights: {time.perf_counter() - t0:.02f} seconds.", file=sys.stderr)
150
+
151
+ model.eval()
152
+ model = fabric.setup_module(model)
153
+
154
+ tokenizer = Tokenizer(checkpoint_dir)
155
+ encoded = tokenizer.encode(prompt, device=fabric.device)
156
+ prompt_length = encoded.size(0)
157
+ max_returned_tokens = prompt_length + max_new_tokens
158
+
159
+ with fabric.init_tensor():
160
+ # set the max_seq_length to limit the memory usage to what we need
161
+ model.max_seq_length = max_returned_tokens
162
+
163
+ L.seed_everything(1234)
164
+ for i in range(num_samples):
165
+ with fabric.init_tensor():
166
+ # enable the kv cache
167
+ model.set_kv_cache(batch_size=1)
168
+
169
+ t0 = time.perf_counter()
170
+ y = generate(model, encoded, max_returned_tokens, temperature=temperature, top_k=top_k)
171
+ t = time.perf_counter() - t0
172
+
173
+ fabric.print(tokenizer.decode(y))
174
+ tokens_generated = y.size(0) - prompt_length
175
+ rank_print(
176
+ fabric,
177
+ f"Time for inference {i + 1}: {t:.02f} sec total, {tokens_generated / t:.02f} tokens/sec",
178
+ file=sys.stderr,
179
+ )
180
+
181
+
182
+ if __name__ == "__main__":
183
+ from jsonargparse import CLI
184
+
185
+ CLI(setup)
extensions/xla/scripts/__init__ ADDED
File without changes
extensions/xla/scripts/prepare_alpaca.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+
3
+ """Implementation derived from https://github.com/tloen/alpaca-lora"""
4
+
5
+ import json
6
+ from pathlib import Path
7
+ from typing import Optional
8
+
9
+ import torch
10
+ import yaml
11
+ from lightning_utilities.core.imports import RequirementCache
12
+ from torch.utils.data import random_split
13
+ from tqdm import tqdm
14
+
15
+ from litgpt.tokenizer import Tokenizer
16
+ from litgpt.utils import CLI
17
+
18
+
19
+ def prepare(
20
+ destination_path: Path = Path("data/alpaca"),
21
+ checkpoint_dir: Path = Path("checkpoints/stabilityai/stablelm-base-alpha-3b"),
22
+ val_split_fraction: float = 0.03865, # to get exactly 2000 validation samples,
23
+ seed: int = 42,
24
+ mask_inputs: bool = False, # as in alpaca-lora
25
+ data_file_name: str = "alpaca_data_cleaned_archive.json",
26
+ data_file_url: str = "https://raw.githubusercontent.com/tloen/alpaca-lora/main/alpaca_data_cleaned_archive.json",
27
+ ignore_index: int = -100,
28
+ max_seq_length: Optional[int] = None,
29
+ ) -> None:
30
+ """Prepare the Alpaca dataset for instruction tuning.
31
+
32
+ The output is a training and test dataset saved as `train.pt` and `test.pt`,
33
+ which stores the preprocessed and tokenized prompts and labels.
34
+ """
35
+ if max_seq_length is None:
36
+ with open(checkpoint_dir / "model_config.yaml", encoding="utf-8") as file:
37
+ config = yaml.safe_load(file)
38
+ max_seq_length = config["block_size"]
39
+
40
+ destination_path.mkdir(parents=True, exist_ok=True)
41
+ data_file_path = destination_path / data_file_name
42
+ print("Loading data file...")
43
+ download_if_missing(data_file_path, data_file_url)
44
+ with open(data_file_path, encoding="utf-8") as file:
45
+ data = json.load(file)
46
+
47
+ print("Loading tokenizer...")
48
+ tokenizer = Tokenizer(checkpoint_dir)
49
+
50
+ # Partition the dataset into train and test
51
+ train_set, test_set = random_split(
52
+ data, [1.0 - val_split_fraction, val_split_fraction], generator=torch.Generator().manual_seed(seed)
53
+ )
54
+ train_set, test_set = list(train_set), list(test_set)
55
+
56
+ print(f"train has {len(train_set):,} samples")
57
+ print(f"test has {len(test_set):,} samples")
58
+
59
+ print("Processing train split ...")
60
+ train_set = [
61
+ prepare_sample(
62
+ example=sample,
63
+ tokenizer=tokenizer,
64
+ max_length=max_seq_length,
65
+ mask_inputs=mask_inputs,
66
+ ignore_index=ignore_index,
67
+ )
68
+ for sample in tqdm(train_set)
69
+ ]
70
+ torch.save(train_set, destination_path / "train.pt")
71
+
72
+ print("Processing test split ...")
73
+ test_set = [
74
+ prepare_sample(
75
+ example=sample,
76
+ tokenizer=tokenizer,
77
+ max_length=max_seq_length,
78
+ mask_inputs=mask_inputs,
79
+ ignore_index=ignore_index,
80
+ )
81
+ for sample in tqdm(test_set)
82
+ ]
83
+ torch.save(test_set, destination_path / "test.pt")
84
+
85
+
86
+ def download_if_missing(file_path: Path, file_url: str) -> None:
87
+ """Downloads the raw json data file and saves it in the given destination."""
88
+ if file_path.exists() and file_path.stat().st_size > 0:
89
+ return
90
+ requests_available = RequirementCache("requests")
91
+ if not requests_available:
92
+ raise ModuleNotFoundError(str(requests_available))
93
+ import requests
94
+
95
+ with open(file_path, "w", encoding="utf-8") as f:
96
+ f.write(requests.get(file_url).text)
97
+
98
+
99
+ def prepare_sample(example: dict, tokenizer: Tokenizer, max_length: int, mask_inputs: bool, ignore_index: int) -> dict:
100
+ """Processes a single sample.
101
+
102
+ Each sample in the dataset consists of:
103
+ - instruction: A string describing the task
104
+ - input: A string holding a special input value for the instruction.
105
+ This only applies to some samples, and in others this is empty.
106
+ - output: The response string
107
+
108
+ This function processes this data to produce a prompt text and a label for
109
+ supervised training. The prompt text is formed as a single message including both
110
+ the instruction and the input. The label/target is the same message but with the
111
+ response attached.
112
+
113
+ Finally, both the prompt and the label get tokenized. If desired, all tokens
114
+ in the label that correspond to the original input prompt get masked out (default).
115
+ """
116
+ full_prompt = generate_prompt(example)
117
+ full_prompt_and_response = full_prompt + example["output"]
118
+ encoded_full_prompt = tokenizer.encode(full_prompt, max_length=max_length)
119
+ encoded_full_prompt_and_response = tokenizer.encode(full_prompt_and_response, eos=True, max_length=max_length)
120
+
121
+ # The labels are the full prompt with response, but with the prompt masked out
122
+ labels = encoded_full_prompt_and_response.clone()
123
+ if mask_inputs:
124
+ labels[: len(encoded_full_prompt)] = ignore_index
125
+
126
+ return {**example, "input_ids": encoded_full_prompt_and_response, "labels": labels}
127
+
128
+
129
+ def generate_prompt(example: dict) -> str:
130
+ """Generates a standardized message to prompt the model with an instruction, optional input and a
131
+ 'response' field."""
132
+
133
+ if example["input"]:
134
+ return (
135
+ "Below is an instruction that describes a task, paired with an input that provides further context. "
136
+ "Write a response that appropriately completes the request.\n\n"
137
+ f"### Instruction:\n{example['instruction']}\n\n### Input:\n{example['input']}\n\n### Response:"
138
+ )
139
+ return (
140
+ "Below is an instruction that describes a task. "
141
+ "Write a response that appropriately completes the request.\n\n"
142
+ f"### Instruction:\n{example['instruction']}\n\n### Response:"
143
+ )
144
+
145
+
146
+ if __name__ == "__main__":
147
+ CLI(prepare)
out/eval/openllama_arc_arxiv_mc/arxiv_mc_heatmap.png ADDED

Git LFS Details

  • SHA256: 2de65942bc32f58aedbed8f9e20993a37b11d9330dff2695e9d47d58399ccab4
  • Pointer size: 131 Bytes
  • Size of remote file: 219 kB
out/eval/openllama_arc_arxiv_mc/arxiv_mc_heatmap_acc.png ADDED

Git LFS Details

  • SHA256: db7c751c52e73ca596438243e4c395fac3c96c0123408b747b666a7eda4c8220
  • Pointer size: 131 Bytes
  • Size of remote file: 202 kB
out/eval/openllama_arxiv_mc/arxiv_mc_heatmap.png ADDED

Git LFS Details

  • SHA256: 23925375d0b0b32e9f708bf5d93c7e5a3de18bbdda368720f23e074edeab3eb4
  • Pointer size: 131 Bytes
  • Size of remote file: 227 kB
out/eval/openllama_arxiv_mc/arxiv_mc_heatmap_acc.png ADDED

Git LFS Details

  • SHA256: e28c269fe12f621d746f3303d3d77fa6f4e9ac6725cae4a0f7be2ecc6bb4d4d3
  • Pointer size: 131 Bytes
  • Size of remote file: 205 kB
out/eval/openllama_benches/monthly_metrics.png ADDED

Git LFS Details

  • SHA256: ec384966933aee65e86b326799fefc2a97fb9e1a44d5aca81c3b6f27415915ae
  • Pointer size: 131 Bytes
  • Size of remote file: 132 kB
out/eval/openllama_ppl/val_ppl_heatmap.png ADDED

Git LFS Details

  • SHA256: 8d022d958def282c91d1104a2e1512f601c4cd0861b1cff95564ed9cae9f88f2
  • Pointer size: 131 Bytes
  • Size of remote file: 464 kB
out/eval/qwen2_7b_question_focus/acc_heatmap.png ADDED

Git LFS Details

  • SHA256: 89f187ee55f570a6e54fa976b9b5a61c24149c73c515cdd257d19c12c3f1c72d
  • Pointer size: 131 Bytes
  • Size of remote file: 216 kB
out/eval/qwen2_7b_question_focus/acc_norm_heatmap.png ADDED

Git LFS Details

  • SHA256: c4c7d88029759cd31676593ab84b1dcf22e2a55b74eb75c28c0ac94705a51662
  • Pointer size: 131 Bytes
  • Size of remote file: 219 kB
out/eval/qwen2_7b_question_focus_lr_plus/acc_heatmap.png ADDED

Git LFS Details

  • SHA256: e28f1a383c23a5adb64ea30ef5a444096f64d358d684ca085c9829e9561aca84
  • Pointer size: 131 Bytes
  • Size of remote file: 213 kB
out/eval/qwen2_7b_question_focus_lr_plus/acc_norm_heatmap.png ADDED

Git LFS Details

  • SHA256: 2bfcc1e1a481e2605a9616daf13e777f1f7d814f42dec4c489440124e8d52fc7
  • Pointer size: 131 Bytes
  • Size of remote file: 217 kB
out/eval/qwen2_7b_question_focus_lr_plus/heatmap.png ADDED

Git LFS Details

  • SHA256: 8bd2dd3fa29a19813e1321da8115b8861eb7c6b0d6f6c4c27478894321dfa7e6
  • Pointer size: 131 Bytes
  • Size of remote file: 116 kB
out/eval/qwen2_7b_question_focus_lr_plus/positive_stability_curve.png ADDED

Git LFS Details

  • SHA256: 84396ffe9203950f3f8b3e4bb80f2c8ef7c10c887e61348ed5c14d1835abb90f
  • Pointer size: 131 Bytes
  • Size of remote file: 177 kB
out/eval/qwen2_7b_question_focus_lr_plus/stability_curve.png ADDED

Git LFS Details

  • SHA256: fc66d354ae7546c49729f8f499f55a4f8ee4f349b45c337015ca927a95f202a4
  • Pointer size: 131 Bytes
  • Size of remote file: 136 kB
out/eval/qwen2_7b_question_focus_lr_plus/summary_heatmap.png ADDED

Git LFS Details

  • SHA256: a48530c2770248f3b63c07b7f2f6c1d9fd7fc55f98498d9943ed128cc9e00597
  • Pointer size: 131 Bytes
  • Size of remote file: 214 kB
out/eval/qwen2_arxiv_mc/arxiv_mc_heatmap.png ADDED

Git LFS Details

  • SHA256: ad52c294277bb0576cf4eae7525a02f432e5a66a25b9a4d863c94e8f2724dadd
  • Pointer size: 131 Bytes
  • Size of remote file: 213 kB
out/eval/qwen2_arxiv_mc/arxiv_mc_heatmap_acc.png ADDED

Git LFS Details

  • SHA256: 67653d1836b8b32067b50efc9cf8f4b1c862cb5a15d75f0ea31cdb6a054ed3db
  • Pointer size: 131 Bytes
  • Size of remote file: 213 kB
out/eval/qwen2_ppl/val_ppl_heatmap.png ADDED

Git LFS Details

  • SHA256: 7531763b2489360f16a0325f828710dbaa6664c9b9dd698f8a896e36c462bfbe
  • Pointer size: 131 Bytes
  • Size of remote file: 442 kB
out/eval/tinyllama_3_epoch_arxiv_mc/arxiv_mc_heatmap.png ADDED

Git LFS Details

  • SHA256: 6f8eb91862296d8d5d02f5948eeabe40ab1ad127f786ed42d16f5f5f41914952
  • Pointer size: 131 Bytes
  • Size of remote file: 218 kB
out/eval/tinyllama_3_epoch_arxiv_mc/arxiv_mc_heatmap_acc.png ADDED

Git LFS Details

  • SHA256: c637eb3e6132be3b78fc533bcf27ae8db34788a74ba96ad903bccfd8314fa4ef
  • Pointer size: 131 Bytes
  • Size of remote file: 200 kB
out/eval/tinyllama_arxiv_mc/arxiv_mc_heatmap.png ADDED

Git LFS Details

  • SHA256: 37b1d2db5c139070dbac13b5fb095b7eb17ef96fe463340fb876058d76b0a846
  • Pointer size: 131 Bytes
  • Size of remote file: 214 kB
out/eval/tinyllama_arxiv_mc/arxiv_mc_heatmap_acc.png ADDED

Git LFS Details

  • SHA256: c1213ab4a5b86d063ea58a0d1cde2ff311bc133209fa3312c001af6699c66906
  • Pointer size: 131 Bytes
  • Size of remote file: 208 kB
out/eval/tinyllama_benches/2407_full/results.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5730d224018003dc11b022be88294d8826441e2f520627b68ea575a253d78f2
3
+ size 141666967
out/eval/tinyllama_benches/2407_full/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
out/eval/tinyllama_benches/2408_full/results.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af531a9213c59c2c565778508b581b66671c8cf39b77fdaab7d0360dab23364e
3
+ size 141669063
out/eval/tinyllama_benches/monthly_metrics.png ADDED

Git LFS Details

  • SHA256: 0881b08352698a919bdbe577450a9ebbd7b08b805fea7b98de7f49ba3a6d9ba7
  • Pointer size: 131 Bytes
  • Size of remote file: 165 kB