EdwardSJ151 commited on
Commit
3af75d1
·
verified ·
1 Parent(s): f209d99

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +35 -0
  2. synthgen/PtPersonaIFGen/.gitignore +6 -0
  3. synthgen/PtPersonaIFGen/README.md +0 -0
  4. synthgen/PtPersonaIFGen/__init__.py +1 -0
  5. synthgen/PtPersonaIFGen/__pycache__/__init__.cpython-312.pyc +0 -0
  6. synthgen/PtPersonaIFGen/ft_unsloth_gpu1_big_qwen.log +2 -0
  7. synthgen/PtPersonaIFGen/lmeval_pt/__init__.py +2 -0
  8. synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/__init__.cpython-311.pyc +0 -0
  9. synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/__init__.cpython-312.pyc +0 -0
  10. synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/instructions.cpython-311.pyc +3 -0
  11. synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/instructions.cpython-312.pyc +3 -0
  12. synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/instructions_registry.cpython-311.pyc +0 -0
  13. synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/instructions_registry.cpython-312.pyc +0 -0
  14. synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/instructions_util.cpython-311.pyc +0 -0
  15. synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/utils.cpython-311.pyc +0 -0
  16. synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/utils.cpython-312.pyc +0 -0
  17. synthgen/PtPersonaIFGen/lmeval_pt/ifeval.yaml +29 -0
  18. synthgen/PtPersonaIFGen/lmeval_pt/instructions.py +2413 -0
  19. synthgen/PtPersonaIFGen/lmeval_pt/instructions_registry.py +273 -0
  20. synthgen/PtPersonaIFGen/lmeval_pt/instructions_util.py +1701 -0
  21. synthgen/PtPersonaIFGen/lmeval_pt/utils.py +134 -0
  22. synthgen/PtPersonaIFGen/main_ifeval_code/__init__.py +2 -0
  23. synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/__init__.cpython-311.pyc +0 -0
  24. synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/config.cpython-311.pyc +0 -0
  25. synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/filter_ifeval_data_pt.cpython-311.pyc +0 -0
  26. synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/json_schemas_pt.cpython-311.pyc +0 -0
  27. synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/phase1_magpie.cpython-311.pyc +0 -0
  28. synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/phase2_detection.cpython-311.pyc +0 -0
  29. synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/phase3_extraction.cpython-311.pyc +0 -0
  30. synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/phase4_postprocess.cpython-311.pyc +0 -0
  31. synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/run_pipeline.cpython-311.pyc +0 -0
  32. synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/system_prompts_pt.cpython-311.pyc +0 -0
  33. synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/utils.cpython-311.pyc +0 -0
  34. synthgen/PtPersonaIFGen/main_ifeval_code/config.py +99 -0
  35. synthgen/PtPersonaIFGen/main_ifeval_code/filter_ifeval_data_pt.py +499 -0
  36. synthgen/PtPersonaIFGen/main_ifeval_code/json_schemas_pt.py +303 -0
  37. synthgen/PtPersonaIFGen/main_ifeval_code/phase1_magpie.py +537 -0
  38. synthgen/PtPersonaIFGen/main_ifeval_code/phase2_detection.py +214 -0
  39. synthgen/PtPersonaIFGen/main_ifeval_code/phase3_extraction.py +225 -0
  40. synthgen/PtPersonaIFGen/main_ifeval_code/phase4_postprocess.py +277 -0
  41. synthgen/PtPersonaIFGen/main_ifeval_code/run_pipeline.py +244 -0
  42. synthgen/PtPersonaIFGen/main_ifeval_code/system_prompts_pt.py +677 -0
  43. synthgen/PtPersonaIFGen/main_ifeval_code/utils.py +95 -0
  44. synthgen/PtPersonaIFGen/phase1_generated_output.jsonl +3 -0
  45. synthgen/PtPersonaIFGen/phase2_detection_output.jsonl +3 -0
  46. synthgen/PtPersonaIFGen/phase3_extraction_output.jsonl +3 -0
  47. synthgen/PtPersonaIFGen/phase4_final_output.jsonl +3 -0
  48. synthgen/PtPersonaIFGen/run.log +3 -0
  49. synthgen/PtPersonaIFGen/run_old.log +97 -0
  50. synthgen/__pycache__/evaluate.cpython-311.pyc +0 -0
.gitattributes CHANGED
@@ -58,3 +58,38 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
61
+ synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/instructions.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
62
+ synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/instructions.cpython-312.pyc filter=lfs diff=lfs merge=lfs -text
63
+ synthgen/PtPersonaIFGen/phase1_generated_output.jsonl filter=lfs diff=lfs merge=lfs -text
64
+ synthgen/PtPersonaIFGen/phase2_detection_output.jsonl filter=lfs diff=lfs merge=lfs -text
65
+ synthgen/PtPersonaIFGen/phase3_extraction_output.jsonl filter=lfs diff=lfs merge=lfs -text
66
+ synthgen/PtPersonaIFGen/run.log filter=lfs diff=lfs merge=lfs -text
67
+ synthgen/cemig_v0/cemig_cleaned/cemig_rewrite_focus.jsonl filter=lfs diff=lfs merge=lfs -text
68
+ synthgen/cemig_v0/cemig_cleaned/cemig_rewrite_simple.jsonl filter=lfs diff=lfs merge=lfs -text
69
+ synthgen/cemig_v0/cemig_cleaned/cemig_summary.jsonl filter=lfs diff=lfs merge=lfs -text
70
+ synthgen/cemig_v0/cemig_cleaned/magpie_conversations_cemig_v1_objetiva.jsonl filter=lfs diff=lfs merge=lfs -text
71
+ synthgen/cemig_v0/cemig_filtered/magpie_conversations_cemig_v1_objetiva.jsonl filter=lfs diff=lfs merge=lfs -text
72
+ synthgen/cemig_v0/first_versions/cemig_clean_Resumo.jsonl filter=lfs diff=lfs merge=lfs -text
73
+ synthgen/cemig_v0/first_versions/cemig_clean_SingleTurnConvo.jsonl filter=lfs diff=lfs merge=lfs -text
74
+ synthgen/cemig_v0/first_versions/cemig_rewrite_focus.jsonl filter=lfs diff=lfs merge=lfs -text
75
+ synthgen/cemig_v0/first_versions/cemig_rewrite_simple.jsonl filter=lfs diff=lfs merge=lfs -text
76
+ synthgen/cemig_v0/first_versions/cemig_summary.jsonl filter=lfs diff=lfs merge=lfs -text
77
+ synthgen/cemig_v0/first_versions/cemig_summary_evaled.jsonl filter=lfs diff=lfs merge=lfs -text
78
+ synthgen/cemig_v0/magpie_conversations_cemig.jsonl filter=lfs diff=lfs merge=lfs -text
79
+ synthgen/cemig_v0/magpie_conversations_cemig_cleaned.jsonl filter=lfs diff=lfs merge=lfs -text
80
+ synthgen/cemig_v0/magpie_conversations_cemig_v0.jsonl filter=lfs diff=lfs merge=lfs -text
81
+ synthgen/cemig_v0/magpie_conversations_cemig_v1_objetiva.jsonl filter=lfs diff=lfs merge=lfs -text
82
+ synthgen/complete/cemig_rewrite_focus.jsonl filter=lfs diff=lfs merge=lfs -text
83
+ synthgen/complete/cemig_rewrite_simple.jsonl filter=lfs diff=lfs merge=lfs -text
84
+ synthgen/complete/cemig_summary.jsonl filter=lfs diff=lfs merge=lfs -text
85
+ synthgen/complete/magpie_conversations_cemig_v1_objetiva.jsonl filter=lfs diff=lfs merge=lfs -text
86
+ synthgen/ifeval_v0/vllm_pipeline_en/phase1_generated_output_v0.jsonl filter=lfs diff=lfs merge=lfs -text
87
+ synthgen/magpie_conversations_cemig_v2_objetiva.jsonl filter=lfs diff=lfs merge=lfs -text
88
+ synthgen/magpie_conversations_cemig_v2_preproc_objetiva.jsonl filter=lfs diff=lfs merge=lfs -text
89
+ synthgen/vllm_pipeline/runs/run03/phase1_generated_output.jsonl filter=lfs diff=lfs merge=lfs -text
90
+ synthgen/vllm_pipeline/runs/run03/phase2_detection_output.jsonl filter=lfs diff=lfs merge=lfs -text
91
+ synthgen/vllm_pipeline/runs/run03/phase3_extraction_output.jsonl filter=lfs diff=lfs merge=lfs -text
92
+ synthgen/vllm_pipeline/runs/run05/phase1_generated_output.jsonl filter=lfs diff=lfs merge=lfs -text
93
+ synthgen/vllm_pipeline/runs/run05/phase2_detection_output.jsonl filter=lfs diff=lfs merge=lfs -text
94
+ synthgen/vllm_pipeline/runs/run05/phase3_extraction_output.jsonl filter=lfs diff=lfs merge=lfs -text
95
+ synthgen/vllm_pipeline/runs/run05/phase4_final_output.jsonl filter=lfs diff=lfs merge=lfs -text
synthgen/PtPersonaIFGen/.gitignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ .env
2
+ previous_things/
3
+ __pycache__/
4
+ *.jsonl
5
+ *.json
6
+ *.txt
synthgen/PtPersonaIFGen/README.md ADDED
File without changes
synthgen/PtPersonaIFGen/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # IFEval Persona Pipeline
synthgen/PtPersonaIFGen/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (147 Bytes). View file
 
synthgen/PtPersonaIFGen/ft_unsloth_gpu1_big_qwen.log ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ nohup: ignoring input
2
+ python: can't open file '/workspace/synthgen/PtPersonaIFGen/ft_unsloth_v5.py': [Errno 2] No such file or directory
synthgen/PtPersonaIFGen/lmeval_pt/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Local copy of lm_eval IFEval code (decoupled from lm_eval package)
2
+ # This allows adding custom instruction types without modifying the external package.
synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (165 Bytes). View file
 
synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (154 Bytes). View file
 
synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/instructions.cpython-311.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90898b12df907b4637c78814b07a1ff3554366412e3259acbf1691c63c35c7ee
3
+ size 120010
synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/instructions.cpython-312.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a8999ea073008826df06d37bdba8f7586b24b29a1d3337d44b294e1cb7855b4
3
+ size 112175
synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/instructions_registry.cpython-311.pyc ADDED
Binary file (10.4 kB). View file
 
synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/instructions_registry.cpython-312.pyc ADDED
Binary file (5.55 kB). View file
 
synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/instructions_util.cpython-311.pyc ADDED
Binary file (19.9 kB). View file
 
synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/utils.cpython-311.pyc ADDED
Binary file (6.63 kB). View file
 
synthgen/PtPersonaIFGen/lmeval_pt/__pycache__/utils.cpython-312.pyc ADDED
Binary file (5.65 kB). View file
 
synthgen/PtPersonaIFGen/lmeval_pt/ifeval.yaml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: ifeval
2
+ dataset_path: google/IFEval
3
+ dataset_name: null
4
+ output_type: generate_until
5
+ test_split: train
6
+ num_fewshot: 0
7
+ doc_to_text: prompt
8
+ doc_to_target: 0
9
+ generation_kwargs:
10
+ until: []
11
+ do_sample: false
12
+ temperature: 0.0
13
+ max_gen_toks: 1280
14
+ process_results: !function utils.process_results
15
+ metric_list:
16
+ - metric: prompt_level_strict_acc
17
+ aggregation: mean
18
+ higher_is_better: true
19
+ - metric: inst_level_strict_acc
20
+ aggregation: !function utils.agg_inst_level_acc
21
+ higher_is_better: true
22
+ - metric: prompt_level_loose_acc
23
+ aggregation: mean
24
+ higher_is_better: true
25
+ - metric: inst_level_loose_acc
26
+ aggregation: !function utils.agg_inst_level_acc
27
+ higher_is_better: true
28
+ metadata:
29
+ version: 4.0
synthgen/PtPersonaIFGen/lmeval_pt/instructions.py ADDED
@@ -0,0 +1,2413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The Google Research Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Library of instructions."""
16
+
17
+ import collections
18
+ import json
19
+ import logging
20
+ import random
21
+ import re
22
+ import string
23
+ from typing import Dict, Optional, Sequence, Union
24
+
25
+ import langdetect
26
+
27
+ from lmeval_pt import instructions_util
28
+
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+ _InstructionArgsDtype = Optional[Dict[str, Union[int, str, Sequence[str]]]]
33
+
34
+ _LANGUAGES = instructions_util.LANGUAGE_CODES
35
+
36
+ # The relational operation for comparison.
37
+ _COMPARISON_RELATION = ("less than", "at least")
38
+
39
+ # The maximum number of sentences.
40
+ _MAX_NUM_SENTENCES = 20
41
+
42
+ # The number of placeholders.
43
+ _NUM_PLACEHOLDERS = 4
44
+
45
+ # The number of bullet lists.
46
+ _NUM_BULLETS = 5
47
+
48
+ # The options of constrained response.
49
+ _CONSTRAINED_RESPONSE_OPTIONS = (
50
+ "My answer is yes.",
51
+ "My answer is no.",
52
+ "My answer is maybe.",
53
+ )
54
+
55
+ # The options of starter keywords.
56
+ _STARTER_OPTIONS = (
57
+ "I would say",
58
+ "My answer is",
59
+ "I believe",
60
+ "In my opinion",
61
+ "I think",
62
+ "I reckon",
63
+ "I feel",
64
+ "From my perspective",
65
+ "As I see it",
66
+ "According to me",
67
+ "As far as I'm concerned",
68
+ "To my understanding",
69
+ "In my view",
70
+ "My take on it is",
71
+ "As per my perception",
72
+ )
73
+
74
+ # The options of ending keywords.
75
+ # TODO(jeffreyzhou) add more ending options
76
+ _ENDING_OPTIONS = ("Any other questions?", "Is there anything else I can help with?")
77
+
78
+ # The number of highlighted sections.
79
+ _NUM_HIGHLIGHTED_SECTIONS = 4
80
+
81
+ # The section splitter.
82
+ _SECTION_SPLITER = ("Section", "SECTION")
83
+
84
+ # The number of sections.
85
+ _NUM_SECTIONS = 5
86
+
87
+ # The number of paragraphs.
88
+ _NUM_PARAGRAPHS = 5
89
+
90
+ # The postscript marker.
91
+ _POSTSCRIPT_MARKER = ("P.S.", "P.P.S")
92
+
93
+ # The number of keywords.
94
+ _NUM_KEYWORDS = 2
95
+
96
+ # The occurrences of a single keyword.
97
+ _KEYWORD_FREQUENCY = 3
98
+
99
+ # The occurrences of a single letter.
100
+ _LETTER_FREQUENCY = 10
101
+
102
+ # The occurrences of words with all capital letters.
103
+ _ALL_CAPITAL_WORD_FREQUENCY = 20
104
+
105
+ # The number of words in the response.
106
+ _NUM_WORDS_LOWER_LIMIT = 100
107
+ _NUM_WORDS_UPPER_LIMIT = 500
108
+
109
+
110
+ class Instruction:
111
+ """An instruction template."""
112
+
113
+ def __init__(self, instruction_id):
114
+ self.id = instruction_id
115
+
116
+ def build_description(self, **kwargs):
117
+ raise NotImplementedError("`build_description` not implemented.")
118
+
119
+ def get_instruction_args(self):
120
+ raise NotImplementedError("`get_instruction_args` not implemented.")
121
+
122
+ def get_instruction_args_keys(self):
123
+ raise NotImplementedError("`get_instruction_args_keys` not implemented.")
124
+
125
+ def check_following(self, value):
126
+ raise NotImplementedError("`check_following` not implemented.")
127
+
128
+
129
+ class ResponseLanguageChecker(Instruction):
130
+ """Check the language of the entire response."""
131
+
132
+ def build_description(self, *, language=None):
133
+ """Build the instruction description.
134
+
135
+ Args:
136
+ language: A string representing the expected language of the response. The
137
+ language has to comply to the 97 types defined in
138
+ `langid.py` (https://pypi.org/project/langid/1.1.5/), which follows
139
+ ISO 639-1 codes (https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes);
140
+ for example, `en` for English, `zh` for Chinese, `fr` for French.
141
+
142
+ Returns:
143
+ A string representing the instruction description.
144
+ """
145
+ self._language = language
146
+ if self._language is None:
147
+ self._language = random.choice(list(_LANGUAGES.keys()))
148
+ # TODO(tianjianlu): opens the description generation to more choices.
149
+ self._description_pattern = (
150
+ "Your ENTIRE response should be in {language} language, no other "
151
+ + "language is allowed."
152
+ )
153
+ return self._description_pattern.format(language=_LANGUAGES[self._language])
154
+
155
+ def get_instruction_args(self):
156
+ """Returns the keyword args of `build_description`."""
157
+ return {"language": self._language}
158
+
159
+ def get_instruction_args_keys(self):
160
+ """Returns the args keys of `build_description`."""
161
+ return ["language"]
162
+
163
+ def check_following(self, value):
164
+ """Check if the language of the entire response follows the instruction.
165
+
166
+ Args:
167
+ value: A string representing the response.
168
+
169
+ Returns:
170
+ True if the language of `value` follows instruction; otherwise False.
171
+ """
172
+ assert isinstance(value, str)
173
+
174
+ try:
175
+ return langdetect.detect(value) == self._language
176
+ except langdetect.LangDetectException as e:
177
+ # Count as instruction is followed.
178
+ logging.error(
179
+ "Unable to detect language for text %s due to %s", value, e
180
+ ) # refex: disable=pytotw.037
181
+ return True
182
+
183
+
184
+ class NumberOfSentences(Instruction):
185
+ """Check the number of sentences."""
186
+
187
+ def build_description(self, *, num_sentences=None, relation=None):
188
+ """Build the instruction description.
189
+
190
+ Args:
191
+ num_sentences: An integer specifying the number of sentences as a
192
+ threshold.
193
+ relation: A string in (`less than`, `at least`), defining the relational
194
+ operator for comparison.
195
+ Two relational comparisons are supported for now:
196
+ if 'less than', the actual number of sentences < the threshold;
197
+ if 'at least', the actual number of sentences >= the threshold.
198
+
199
+ Returns:
200
+ A string representing the instruction description.
201
+ """
202
+ # The number of sentences as a threshold for comparison.
203
+ self._num_sentences_threshold = num_sentences
204
+ if self._num_sentences_threshold is None or self._num_sentences_threshold < 0:
205
+ self._num_sentences_threshold = random.randint(1, _MAX_NUM_SENTENCES)
206
+
207
+ if relation is None:
208
+ self._comparison_relation = random.choice(_COMPARISON_RELATION)
209
+ elif relation not in _COMPARISON_RELATION:
210
+ raise ValueError(
211
+ "The supported relation for comparison must be in "
212
+ f"{_COMPARISON_RELATION}, but {relation} is given."
213
+ )
214
+ else:
215
+ self._comparison_relation = relation
216
+
217
+ self._description_pattern = (
218
+ "Your response should contain {relation} {num_sentences} sentences."
219
+ )
220
+ return self._description_pattern.format(
221
+ relation=self._comparison_relation,
222
+ num_sentences=self._num_sentences_threshold,
223
+ )
224
+
225
+ def get_instruction_args(self):
226
+ """Returns the keyword args of `build_description`."""
227
+ return {
228
+ "num_sentences": self._num_sentences_threshold,
229
+ "relation": self._comparison_relation,
230
+ }
231
+
232
+ def get_instruction_args_keys(self):
233
+ """Returns the args keys of `build_description`."""
234
+ return ["num_sentences", "relation"]
235
+
236
+ def check_following(self, value):
237
+ """Check if the number of sentences follows the instruction.
238
+
239
+ Args:
240
+ value: A string representing the response.
241
+
242
+ Returns:
243
+ True if the response follows the instruction.
244
+
245
+ Raise:
246
+ ValueError if the string in `instruction_args` is not in
247
+ [`less_than`, `at_least`].
248
+ """
249
+ num_sentences = instructions_util.count_sentences(value)
250
+ if self._comparison_relation == _COMPARISON_RELATION[0]:
251
+ return num_sentences < self._num_sentences_threshold
252
+ elif self._comparison_relation == _COMPARISON_RELATION[1]:
253
+ return num_sentences >= self._num_sentences_threshold
254
+
255
+
256
+ class PlaceholderChecker(Instruction):
257
+ """Check the placeholders in template writing."""
258
+
259
+ def build_description(self, *, num_placeholders=None):
260
+ """Build the instruction description.
261
+
262
+ Args:
263
+ num_placeholders: An integer denoting the minimum number of
264
+ placeholders required in the response.
265
+
266
+ Returns:
267
+ A string representing the instruction description.
268
+ """
269
+ self._num_placeholders = num_placeholders
270
+ if self._num_placeholders is None or self._num_placeholders < 0:
271
+ self._num_placeholders = random.randint(1, _NUM_PLACEHOLDERS)
272
+ self._description_pattern = (
273
+ "The response must contain at least {num_placeholders} placeholders "
274
+ + "represented by square brackets, such as [address]."
275
+ )
276
+ return self._description_pattern.format(num_placeholders=self._num_placeholders)
277
+
278
+ def get_instruction_args(self):
279
+ """Returns the keyword args of `build_description`."""
280
+ return {"num_placeholders": self._num_placeholders}
281
+
282
+ def get_instruction_args_keys(self):
283
+ """Returns the args keys of `build_description`."""
284
+ return ["num_placeholders"]
285
+
286
+ def check_following(self, value):
287
+ """Check if the number of placeholders follows the instruction.
288
+
289
+ Args:
290
+ value: A string representing the response.
291
+
292
+ Returns:
293
+ True if the actual number of placeholders in the response is greater than
294
+ or equal to `num_placeholders`; otherwise, False.
295
+ """
296
+ placeholders = re.findall(r"\[.*?\]", value)
297
+ num_placeholders = len(placeholders)
298
+ return num_placeholders >= self._num_placeholders
299
+
300
+
301
+ class BulletListChecker(Instruction):
302
+ """Checks the bullet list in the prompt."""
303
+
304
+ def build_description(self, *, num_bullets=None):
305
+ """Build the instruction description.
306
+
307
+ Args:
308
+ num_bullets: An integer specifying the exact number of bullet lists
309
+ that is required to appear in the response.
310
+
311
+ Returns:
312
+ A string representing the instruction description.
313
+ """
314
+ self._num_bullets = num_bullets
315
+ if self._num_bullets is None or self._num_bullets < 0:
316
+ self._num_bullets = random.randint(1, _NUM_BULLETS)
317
+ self._description_pattern = (
318
+ "Your answer must contain exactly {num_bullets} bullet points. "
319
+ + "Use the markdown bullet points such as:\n"
320
+ + "* This is point 1. \n"
321
+ + "* This is point 2"
322
+ )
323
+ return self._description_pattern.format(num_bullets=self._num_bullets)
324
+
325
+ def get_instruction_args(self):
326
+ """Returns the keyword args of `build_description`."""
327
+ return {"num_bullets": self._num_bullets}
328
+
329
+ def get_instruction_args_keys(self):
330
+ """Returns the args keys of `build_description`."""
331
+ return ["num_bullets"]
332
+
333
+ def check_following(self, value):
334
+ r"""Check if the number of bullet lists meets the requirement.
335
+
336
+ Args:
337
+ value: A string representing the response. The response is expected to
338
+ contain some bullet lists that start with `\*`.
339
+
340
+ Returns:
341
+ True if the actual number of bullet lists in the response meets the
342
+ requirement.
343
+ """
344
+ bullet_lists = re.findall(r"^\s*\*[^\*].*$", value, flags=re.MULTILINE)
345
+ bullet_lists_2 = re.findall(r"^\s*-.*$", value, flags=re.MULTILINE)
346
+ num_bullet_lists = len(bullet_lists) + len(bullet_lists_2)
347
+ return num_bullet_lists == self._num_bullets
348
+
349
+
350
+ class ConstrainedResponseChecker(Instruction):
351
+ """Checks the constrained response."""
352
+
353
+ def build_description(self):
354
+ """Build the instruction description."""
355
+ # A sequence of string(s) representing the options of the expected response.
356
+ self._constrained_responses = _CONSTRAINED_RESPONSE_OPTIONS
357
+ self._description_pattern = (
358
+ "Answer with one of the following options: {response_options}"
359
+ )
360
+ return self._description_pattern.format(
361
+ response_options=self._constrained_responses
362
+ )
363
+
364
+ def get_instruction_args(self):
365
+ """Returns the keyword args of `build_description`."""
366
+ return None
367
+
368
+ def get_instruction_args_keys(self):
369
+ """Returns the args keys of `build_description`."""
370
+ return []
371
+
372
+ def check_following(self, value):
373
+ """Checks if the response matches the constrained options.
374
+
375
+ Args:
376
+ value: A string representing the response.
377
+
378
+ Returns:
379
+ True if the actual response contains one of the options in the constrained
380
+ responses; otherwise False.
381
+ """
382
+ value = value.strip()
383
+ for constrained_response in self._constrained_responses:
384
+ if constrained_response in value:
385
+ return True
386
+ return False
387
+
388
+
389
+ class ConstrainedStartChecker(Instruction):
390
+ """Checks the response start."""
391
+
392
+ def build_description(self, *, starter=None):
393
+ """Build the instruction description.
394
+
395
+ Args:
396
+ starter: A string representing the keyword that the response should start
397
+ with.
398
+
399
+ Returns:
400
+ A string representing the instruction description.
401
+ """
402
+ self._starter = starter.strip() if isinstance(starter, str) else starter
403
+ if self._starter is None:
404
+ self._starter = random.choice(_STARTER_OPTIONS)
405
+ self._description_pattern = (
406
+ "During the conversation, when it is your turn, "
407
+ + "please always start with {starter}"
408
+ )
409
+ return self._description_pattern.format(starter=self._starter)
410
+
411
+ def get_instruction_args(self):
412
+ """Returns the keyword args of `build_description`."""
413
+ return {"starter": self._starter}
414
+
415
+ def get_instruction_args_keys(self):
416
+ """Returns the args keys of `build_description`."""
417
+ return ["starter"]
418
+
419
+ def check_following(self, value):
420
+ """Checks if the response starts with the constrained keyword or phrase.
421
+
422
+ Args:
423
+ value: A string representing the response.
424
+
425
+ Returns:
426
+ True if the response starts with the given phrase or keyword that is
427
+ contained in `instruction_args`; otherwise, False.
428
+ """
429
+ response_pattern = r"^\s*" + self._starter + r".*$"
430
+ response_with_constrained_start = re.search(
431
+ response_pattern, value, flags=re.MULTILINE
432
+ )
433
+ return True if response_with_constrained_start else False
434
+
435
+
436
+ class HighlightSectionChecker(Instruction):
437
+ """Checks the highlighted section."""
438
+
439
+ def build_description(self, *, num_highlights=None):
440
+ """Build the instruction description.
441
+
442
+ Args:
443
+ num_highlights: An integer specifying the minimum number of highlighted
444
+ sections.
445
+
446
+ Returns:
447
+ A string representing the instruction description.
448
+ """
449
+ self._num_highlights = num_highlights
450
+ if self._num_highlights is None or self._num_highlights < 0:
451
+ self._num_highlights = random.randint(1, _NUM_HIGHLIGHTED_SECTIONS)
452
+
453
+ self._description_pattern = (
454
+ "Highlight at least {num_highlights} sections in your answer with "
455
+ + "markdown, i.e. *highlighted section*."
456
+ )
457
+
458
+ return self._description_pattern.format(num_highlights=self._num_highlights)
459
+
460
+ def get_instruction_args(self):
461
+ """Returns the keyword args of `build_description`."""
462
+ return {"num_highlights": self._num_highlights}
463
+
464
+ def get_instruction_args_keys(self):
465
+ """Returns the args keys of `build_description`."""
466
+ return ["num_highlights"]
467
+
468
+ def check_following(self, value):
469
+ """Checks if the number of highlighted sections meets the requirement.
470
+
471
+ Args:
472
+ value: a string representing the response. The response is expected to
473
+ contain highlighted sections in the format of *highlighted*.
474
+
475
+ Returns:
476
+ True if the actual number of highlighted sections in the format of
477
+ *highlighted sections* meets the minimum requirement; otherwise False.
478
+ """
479
+ num_highlights = 0
480
+ highlights = re.findall(r"\*[^\n\*]*\*", value)
481
+ double_highlights = re.findall(r"\*\*[^\n\*]*\*\*", value)
482
+ for highlight in highlights:
483
+ if highlight.strip("*").strip():
484
+ num_highlights += 1
485
+ for highlight in double_highlights:
486
+ if highlight.removeprefix("**").removesuffix("**").strip():
487
+ num_highlights += 1
488
+
489
+ return num_highlights >= self._num_highlights
490
+
491
+
492
+ class SectionChecker(Instruction):
493
+ """Checks the sections."""
494
+
495
+ def build_description(self, *, section_spliter=None, num_sections=None):
496
+ """Build the instruction description.
497
+
498
+ Args:
499
+ section_spliter: A string represents the section spliter keyword that
500
+ marks a new section, i.e., `Section` or `SECTION`.
501
+ num_sections: An integer specifying the number of sections.
502
+
503
+ Returns:
504
+ A string representing the instruction description.
505
+ """
506
+ self._section_spliter = (
507
+ section_spliter.strip()
508
+ if isinstance(section_spliter, str)
509
+ else section_spliter
510
+ )
511
+ if self._section_spliter is None:
512
+ self._section_spliter = random.choice(_SECTION_SPLITER)
513
+
514
+ self._num_sections = num_sections
515
+ if self._num_sections is None or self._num_sections < 0:
516
+ self._num_sections = random.randint(1, _NUM_SECTIONS)
517
+
518
+ self._description_pattern = (
519
+ "Your response must have {num_sections} sections. Mark the beginning "
520
+ + "of each section with {section_spliter} X, such as:\n"
521
+ + "{section_spliter} 1\n"
522
+ + "[content of section 1]\n"
523
+ + "{section_spliter} 2\n"
524
+ + "[content of section 2]"
525
+ )
526
+
527
+ return self._description_pattern.format(
528
+ num_sections=self._num_sections, section_spliter=self._section_spliter
529
+ )
530
+
531
+ def get_instruction_args(self):
532
+ """Returns the keyword args of `build_description`."""
533
+ return {
534
+ "section_spliter": self._section_spliter,
535
+ "num_sections": self._num_sections,
536
+ }
537
+
538
+ def get_instruction_args_keys(self):
539
+ """Returns the args keys of `build_description`."""
540
+ return ["section_spliter", "num_sections"]
541
+
542
+ def check_following(self, value):
543
+ """Checks the response contains multiple sections.
544
+
545
+ Args:
546
+ value: A string representing the response. The response is expected
547
+ to contain multiple sections (number of sections is greater than 1).
548
+ A new section starts with `Section 1`, where the number denotes the
549
+ section index.
550
+
551
+ Returns:
552
+ True if the number of sections in the response is greater than or equal to
553
+ the minimum number of sections; otherwise, False.
554
+ """
555
+ section_splitter_patten = r"\s?" + self._section_spliter + r"\s?\d+\s?"
556
+ sections = re.split(section_splitter_patten, value)
557
+ num_sections = len(sections) - 1
558
+ return num_sections >= self._num_sections
559
+
560
+
561
+ class ParagraphChecker(Instruction):
562
+ """Checks the paragraphs."""
563
+
564
+ def build_description(self, *, num_paragraphs=None):
565
+ """Build the instruction description.
566
+
567
+ Args:
568
+ num_paragraphs: An integer specifying the number of paragraphs.
569
+
570
+ Returns:
571
+ A string representing the instruction description.
572
+ """
573
+ self._num_paragraphs = num_paragraphs
574
+ if self._num_paragraphs is None or self._num_paragraphs < 0:
575
+ self._num_paragraphs = random.randint(1, _NUM_PARAGRAPHS)
576
+
577
+ self._description_pattern = (
578
+ "There should be {num_paragraphs} paragraphs. "
579
+ + "Paragraphs are separated with the markdown divider: ***"
580
+ )
581
+
582
+ return self._description_pattern.format(num_paragraphs=self._num_paragraphs)
583
+
584
+ def get_instruction_args(self):
585
+ """Returns the keyword args of `build_description`."""
586
+ return {"num_paragraphs": self._num_paragraphs}
587
+
588
+ def get_instruction_args_keys(self):
589
+ """Returns the args keys of `build_description`."""
590
+ return ["num_paragraphs"]
591
+
592
+ def check_following(self, value):
593
+ """Checks the response contains required number of paragraphs.
594
+
595
+ Args:
596
+ value: A string representing the response. The response may contain
597
+ paragraphs that are separated by the markdown divider: `***`.
598
+
599
+ Returns:
600
+ True if the actual number of paragraphs is the same as required;
601
+ otherwise, False.
602
+ """
603
+ paragraphs = re.split(r"\s?\*\*\*\s?", value)
604
+ num_paragraphs = len(paragraphs)
605
+
606
+ for index, paragraph in enumerate(paragraphs):
607
+ if not paragraph.strip():
608
+ if index == 0 or index == len(paragraphs) - 1:
609
+ num_paragraphs -= 1
610
+ else:
611
+ return False
612
+
613
+ return num_paragraphs == self._num_paragraphs
614
+
615
+
616
+ class PostscriptChecker(Instruction):
617
+ """Checks the postscript."""
618
+
619
+ def build_description(self, *, postscript_marker=None):
620
+ """Build the instruction description.
621
+
622
+ Args:
623
+ postscript_marker: A string containing the keyword that marks the start
624
+ of the postscript section.
625
+
626
+ Returns:
627
+ A string representing the instruction description.
628
+ """
629
+ self._postscript_marker = (
630
+ postscript_marker.strip()
631
+ if isinstance(postscript_marker, str)
632
+ else postscript_marker
633
+ )
634
+ if self._postscript_marker is None:
635
+ self._postscript_marker = random.choice(_POSTSCRIPT_MARKER)
636
+
637
+ self._description_pattern = (
638
+ "At the end of your response, please explicitly add a postscript "
639
+ + "starting with {postscript}"
640
+ )
641
+
642
+ return self._description_pattern.format(postscript=self._postscript_marker)
643
+
644
+ def get_instruction_args(self):
645
+ """Returns the keyword args of `build_description`."""
646
+ return {"postscript_marker": self._postscript_marker}
647
+
648
+ def get_instruction_args_keys(self):
649
+ """Returns the args keys of `build_description`."""
650
+ return ["postscript_marker"]
651
+
652
+ def check_following(self, value):
653
+ """Checks if the response follows the postscript format.
654
+
655
+ Args:
656
+ value: a string representing the response. The response is expected to
657
+ contain a postscript section.
658
+
659
+ Returns:
660
+ True if the response contains a postscript section starting with
661
+ the keyword containing in the `instruction_args`; otherwise False.
662
+ """
663
+ value = value.lower()
664
+ if self._postscript_marker == "P.P.S":
665
+ postscript_pattern = r"\s*p\.\s?p\.\s?s.*$"
666
+ elif self._postscript_marker == "P.S.":
667
+ postscript_pattern = r"\s*p\.\s?s\..*$"
668
+ else:
669
+ postscript_pattern = r"\s*" + self._postscript_marker.lower() + r".*$"
670
+ postscript = re.findall(postscript_pattern, value, flags=re.MULTILINE)
671
+ return True if postscript else False
672
+
673
+
674
+ class RephraseChecker(Instruction):
675
+ """Checks the rephrase."""
676
+
677
+ def build_description(self, *, original_message):
678
+ """Build the instruction description.
679
+
680
+ Args:
681
+ original_message: A string representing the original message. The
682
+ rephrased response should only change its words/sentences in between
683
+ its two asterisks, for example, *change me*. Both original and rephrased
684
+ messages should contain the changes in the form of *change me*.
685
+
686
+ Returns:
687
+ A string representing the instruction description.
688
+ """
689
+ if not self.is_change(original_message):
690
+ raise ValueError(
691
+ f"Message {original_message} does not contain changes "
692
+ "in the form of *change me*."
693
+ )
694
+
695
+ self._reference_without_change = original_message
696
+ self._description = (
697
+ "Rephrasing: Your rephrased response should only"
698
+ + "change the words/sentences in between two asterisks"
699
+ + "such as *change me*."
700
+ )
701
+ return self._description
702
+
703
+ def get_instruction_args(self):
704
+ """Returns the keyword args of `build_description`."""
705
+ return {"original_message": self._reference_without_change}
706
+
707
+ def get_instruction_args_keys(self):
708
+ """Returns the args keys of `build_description`."""
709
+ return ["original_message"]
710
+
711
+ def check_following(self, value):
712
+ r"""Checks if the rephrasing follows the instruction.
713
+
714
+ Args:
715
+ value: A string representing the response, which is expected to rephras
716
+ the string of `instruction_args`.
717
+
718
+ Returns:
719
+ True if `value` and `instruction_args` only differ by the words/sentences
720
+ in between two asterisks such as *change me*; otherwise, False.
721
+ """
722
+
723
+ if not self.is_change(value):
724
+ raise ValueError(
725
+ f"value {value} does not contain changes in the form of *change me*."
726
+ )
727
+
728
+ response_without_changes = self.strip_changes(value)
729
+ reference_without_changes = self.strip_changes(self._reference_without_change)
730
+
731
+ return response_without_changes == reference_without_changes
732
+
733
+ def is_change(self, response):
734
+ """Check if there is change in the response in the form of *change me*."""
735
+ return re.search(r"\*.*\*", response)
736
+
737
+ def strip_changes(self, response):
738
+ """Strips off the changes."""
739
+ return re.sub(r"\*.*\*", "", response)
740
+
741
+
742
+ class KeywordChecker(Instruction):
743
+ """Check the existence of certain keywords."""
744
+
745
+ def build_description(self, *, keywords=None):
746
+ """Build the instruction description.
747
+
748
+ Args:
749
+ keywords: A sequence of strings representing the keywords that are
750
+ expected in the response.
751
+
752
+ Returns:
753
+ A string representing the instruction description.
754
+ """
755
+
756
+ if not keywords:
757
+ self._keywords = instructions_util.generate_keywords(
758
+ num_keywords=_NUM_KEYWORDS
759
+ )
760
+ else:
761
+ self._keywords = keywords
762
+ self._keywords = sorted(self._keywords)
763
+
764
+ self._description_pattern = "Include keywords {keywords} in the response."
765
+
766
+ return self._description_pattern.format(keywords=self._keywords)
767
+
768
+ def get_instruction_args(self):
769
+ """Returns the keyword args of `build_description`."""
770
+ return {"keywords": self._keywords}
771
+
772
+ def get_instruction_args_keys(self):
773
+ """Returns the args keys of `build_description`."""
774
+ return ["keywords"]
775
+
776
+ def check_following(self, value):
777
+ """Check if the response contain the expected keywords."""
778
+ for keyword in self._keywords:
779
+ if not re.search(keyword, value, flags=re.IGNORECASE):
780
+ return False
781
+ return True
782
+
783
+
784
+ class KeywordFrequencyChecker(Instruction):
785
+ """Check the keyword frequency."""
786
+
787
+ def build_description(self, *, keyword=None, frequency=None, relation=None):
788
+ """Build the instruction description.
789
+
790
+ Args:
791
+ keyword: A string representing a keyword that is expected in the response.
792
+ frequency: An integer specifying the number of times `keyword` is expected
793
+ to appear in the response.
794
+ relation: A string in (`less than`, `at least`), defining the relational
795
+ operator for comparison.
796
+ Two relational comparisons are supported for now:
797
+ if 'less than', the actual number of occurrences < frequency;
798
+ if 'at least', the actual number of occurrences >= frequency.
799
+
800
+ Returns:
801
+ A string representing the instruction description.
802
+ """
803
+ if not keyword:
804
+ self._keyword = instructions_util.generate_keywords(num_keywords=1)[0]
805
+ else:
806
+ self._keyword = keyword.strip()
807
+
808
+ self._frequency = frequency
809
+ if self._frequency is None or self._frequency < 0:
810
+ self._frequency = random.randint(1, _KEYWORD_FREQUENCY)
811
+
812
+ if relation is None:
813
+ self._comparison_relation = random.choice(_COMPARISON_RELATION)
814
+ elif relation not in _COMPARISON_RELATION:
815
+ raise ValueError(
816
+ "The supported relation for comparison must be in "
817
+ f"{_COMPARISON_RELATION}, but {relation} is given."
818
+ )
819
+ else:
820
+ self._comparison_relation = relation
821
+
822
+ self._description_pattern = (
823
+ "In your response, the word {keyword} should appear {relation} "
824
+ + "{frequency} times."
825
+ )
826
+
827
+ return self._description_pattern.format(
828
+ keyword=self._keyword,
829
+ relation=self._comparison_relation,
830
+ frequency=self._frequency,
831
+ )
832
+
833
+ def get_instruction_args(self):
834
+ """Returns the keyword args of `build_description`."""
835
+ return {
836
+ "keyword": self._keyword,
837
+ "frequency": self._frequency,
838
+ "relation": self._comparison_relation,
839
+ }
840
+
841
+ def get_instruction_args_keys(self):
842
+ """Returns the args keys of `build_description`."""
843
+ return ["keyword", "frequency", "relation"]
844
+
845
+ def check_following(self, value):
846
+ """Checks if the response contain the keyword with required frequency."""
847
+ actual_occurrences = len(re.findall(self._keyword, value, flags=re.IGNORECASE))
848
+
849
+ if self._comparison_relation == _COMPARISON_RELATION[0]:
850
+ return actual_occurrences < self._frequency
851
+ elif self._comparison_relation == _COMPARISON_RELATION[1]:
852
+ return actual_occurrences >= self._frequency
853
+
854
+
855
+ class NumberOfWords(Instruction):
856
+ """Checks the number of words."""
857
+
858
+ def build_description(self, *, num_words=None, relation=None):
859
+ """Build the instruction description.
860
+
861
+ Args:
862
+ num_words: An integer specifying the number of words contained in the
863
+ response.
864
+ relation: A string in (`less than`, `at least`), defining the relational
865
+ operator for comparison.
866
+ Two relational comparisons are supported for now:
867
+ if 'less than', the actual number of words < num_words;
868
+ if 'at least', the actual number of words >= num_words.
869
+
870
+ Returns:
871
+ A string representing the instruction description.
872
+ """
873
+
874
+ self._num_words = num_words
875
+ if self._num_words is None or self._num_words < 0:
876
+ self._num_words = random.randint(
877
+ _NUM_WORDS_LOWER_LIMIT, _NUM_WORDS_UPPER_LIMIT
878
+ )
879
+
880
+ if relation is None:
881
+ self._comparison_relation = random.choice(_COMPARISON_RELATION)
882
+ elif relation not in _COMPARISON_RELATION:
883
+ raise ValueError(
884
+ "The supported relation for comparison must be in "
885
+ f"{_COMPARISON_RELATION}, but {relation} is given."
886
+ )
887
+ else:
888
+ self._comparison_relation = relation
889
+
890
+ self._description_pattern = "Answer with {relation} {num_words} words."
891
+
892
+ return self._description_pattern.format(
893
+ relation=self._comparison_relation, num_words=self._num_words
894
+ )
895
+
896
+ def get_instruction_args(self):
897
+ """Returns the keyword args of `build_description`."""
898
+ return {"num_words": self._num_words, "relation": self._comparison_relation}
899
+
900
+ def get_instruction_args_keys(self):
901
+ """Returns the args keys of `build_description`."""
902
+ return ["num_words", "relation"]
903
+
904
+ def check_following(self, value):
905
+ """Checks if the response contains the expected number of words."""
906
+ num_words = instructions_util.count_words(value)
907
+
908
+ if self._comparison_relation == _COMPARISON_RELATION[0]:
909
+ return num_words < self._num_words
910
+ elif self._comparison_relation == _COMPARISON_RELATION[1]:
911
+ return num_words >= self._num_words
912
+
913
+
914
+ class JsonFormat(Instruction):
915
+ """Check the Json format."""
916
+
917
+ def build_description(self):
918
+ self._description_pattern = (
919
+ "Entire output should be wrapped in JSON format. You can use markdown"
920
+ " ticks such as ```."
921
+ )
922
+ return self._description_pattern
923
+
924
+ def get_instruction_args(self):
925
+ """Returns the keyword args of `build_description`."""
926
+ return None
927
+
928
+ def get_instruction_args_keys(self):
929
+ """Returns the args keys of `build_description`."""
930
+ return []
931
+
932
+ def check_following(self, value):
933
+ value = (
934
+ value.strip()
935
+ .removeprefix("```json")
936
+ .removeprefix("```Json")
937
+ .removeprefix("```JSON")
938
+ .removeprefix("```")
939
+ .removesuffix("```")
940
+ .strip()
941
+ )
942
+ try:
943
+ json.loads(value)
944
+ except ValueError:
945
+ return False
946
+ return True
947
+
948
+
949
+ class ParagraphFirstWordCheck(Instruction):
950
+ """Check the paragraph and the first word of the nth paragraph."""
951
+
952
+ def build_description(
953
+ self, num_paragraphs=None, nth_paragraph=None, first_word=None
954
+ ):
955
+ r"""Build the instruction description.
956
+
957
+ Args:
958
+ num_paragraphs: An integer indicating the number of paragraphs expected
959
+ in the response. A paragraph is a subset of the string that is
960
+ expected to be separated by '\n\n'.
961
+ nth_paragraph: An integer indicating the paragraph number that we look at.
962
+ Note that n starts from 1.
963
+ first_word: A string that represent the first word of the bth paragraph.
964
+
965
+ Returns:
966
+ A string representing the instruction description.
967
+ """
968
+ self._num_paragraphs = num_paragraphs
969
+ if self._num_paragraphs is None or self._num_paragraphs < 0:
970
+ self._num_paragraphs = random.randint(1, _NUM_PARAGRAPHS)
971
+
972
+ self._nth_paragraph = nth_paragraph
973
+ if (
974
+ self._nth_paragraph is None
975
+ or self._nth_paragraph <= 0
976
+ or self._nth_paragraph > self._num_paragraphs
977
+ ):
978
+ self._nth_paragraph = random.randint(1, self._num_paragraphs + 1)
979
+
980
+ self._first_word = first_word
981
+ if self._first_word is None:
982
+ self._first_word = instructions_util.generate_keywords(num_keywords=1)[0]
983
+ self._first_word = self._first_word.lower()
984
+
985
+ self._description_pattern = (
986
+ "There should be {num_paragraphs} paragraphs. "
987
+ + "Paragraphs and only paragraphs are separated with each other by two "
988
+ + "new lines as if it was '\\n\\n' in python. "
989
+ + "Paragraph {nth_paragraph} must start with word {first_word}."
990
+ )
991
+
992
+ return self._description_pattern.format(
993
+ num_paragraphs=self._num_paragraphs,
994
+ nth_paragraph=self._nth_paragraph,
995
+ first_word=self._first_word,
996
+ )
997
+
998
+ def get_instruction_args(self):
999
+ """Returns the keyword args of `build_description`."""
1000
+ return {
1001
+ "num_paragraphs": self._num_paragraphs,
1002
+ "nth_paragraph": self._nth_paragraph,
1003
+ "first_word": self._first_word,
1004
+ }
1005
+
1006
+ def get_instruction_args_keys(self):
1007
+ """Returns the args keys of `build_description`."""
1008
+ return ["num_paragraphs", "nth_paragraph", "first_word"]
1009
+
1010
+ def check_following(self, value):
1011
+ """Checks for required number of paragraphs and correct first word.
1012
+
1013
+ Args:
1014
+ value: a string representing the response. The response may contain
1015
+ paragraphs that are separated by two new lines and the first word of
1016
+ the nth paragraph will have to match a specified word.
1017
+
1018
+ Returns:
1019
+ True if the number of paragraphs is the same as required and the first
1020
+ word of the specified paragraph is the same as required. Otherwise, false.
1021
+ """
1022
+
1023
+ paragraphs = re.split(r"\n\n", value)
1024
+ num_paragraphs = len(paragraphs)
1025
+
1026
+ for paragraph in paragraphs:
1027
+ if not paragraph.strip():
1028
+ num_paragraphs -= 1
1029
+
1030
+ # check that index doesn't go out of bounds
1031
+ if self._nth_paragraph <= num_paragraphs:
1032
+ paragraph = paragraphs[self._nth_paragraph - 1].strip()
1033
+ if not paragraph:
1034
+ return False
1035
+ else:
1036
+ return False
1037
+
1038
+ first_word = ""
1039
+ punctuation = {".", ",", "?", "!", "'", '"'}
1040
+
1041
+ # get first word and remove punctuation
1042
+ word = paragraph.split()[0].strip()
1043
+ # TODO(jeffrey): make more complex?
1044
+ word = word.lstrip("'")
1045
+ word = word.lstrip('"')
1046
+
1047
+ for letter in word:
1048
+ if letter in punctuation:
1049
+ break
1050
+ first_word += letter.lower()
1051
+
1052
+ return num_paragraphs == self._num_paragraphs and first_word == self._first_word
1053
+
1054
+
1055
+ # TODO(jeffrey) add relation - at least/at most?
1056
+ class KeySentenceChecker(Instruction):
1057
+ """Check the existence of certain key sentences."""
1058
+
1059
+ def build_description(self, key_sentences=None, num_sentences=None):
1060
+ """Build the instruction description.
1061
+
1062
+ Args:
1063
+ key_sentences: A sequences of strings representing the key sentences that
1064
+ are expected in the response.
1065
+ num_sentences: The number of key sentences that are expected to be seen in
1066
+ the response.
1067
+
1068
+ Returns:
1069
+ A string representing the instruction description.
1070
+ """
1071
+
1072
+ if not key_sentences:
1073
+ # TODO(jeffrey) make a generate sentences function? wonderwords package
1074
+ self._key_sentences = set(["For now, this is fine."])
1075
+ else:
1076
+ self._key_sentences = key_sentences
1077
+
1078
+ if not num_sentences:
1079
+ self._num_sentences = random.randint(1, len(self._key_sentences))
1080
+ else:
1081
+ self._num_sentences = num_sentences
1082
+
1083
+ self._description_pattern = (
1084
+ "Include {num_sentences} of the following sentences {key_sentences}"
1085
+ )
1086
+
1087
+ return self._description_pattern.format(
1088
+ num_sentences=self._num_sentences, key_sentences=self._key_sentences
1089
+ )
1090
+
1091
+ def get_instruction_args(self):
1092
+ """Returns the keyword args of `build_description`."""
1093
+ return {
1094
+ "num_sentences": self._num_sentences,
1095
+ "key_sentences": list(self._key_sentences),
1096
+ }
1097
+
1098
+ def get_instruction_args_keys(self):
1099
+ """Returns the args keys of `build_description`."""
1100
+ return ["num_sentences", "key_sentences"]
1101
+
1102
+ def check_following(self, value):
1103
+ """Checks if the response contains the expected key sentences."""
1104
+ count = 0
1105
+ sentences = instructions_util.split_into_sentences(value)
1106
+ for sentence in self._key_sentences:
1107
+ if sentence in sentences:
1108
+ count += 1
1109
+
1110
+ return count == self._num_sentences
1111
+
1112
+
1113
+ class ForbiddenWords(Instruction):
1114
+ """Checks that specified words are not used in response."""
1115
+
1116
+ def build_description(self, forbidden_words=None):
1117
+ """Build the instruction description.
1118
+
1119
+ Args:
1120
+ forbidden_words: A sequences of strings representing words that are not
1121
+ allowed in the response.
1122
+
1123
+ Returns:
1124
+ A string representing the instruction description.
1125
+ """
1126
+
1127
+ if not forbidden_words:
1128
+ self._forbidden_words = instructions_util.generate_keywords(
1129
+ num_keywords=_NUM_KEYWORDS
1130
+ )
1131
+ else:
1132
+ self._forbidden_words = list(set(forbidden_words))
1133
+ self._forbidden_words = sorted(self._forbidden_words)
1134
+ self._description_pattern = (
1135
+ "Do not include keywords {forbidden_words} in the response."
1136
+ )
1137
+
1138
+ return self._description_pattern.format(forbidden_words=self._forbidden_words)
1139
+
1140
+ def get_instruction_args(self):
1141
+ """Returns the keyword args of `build_description`."""
1142
+ return {"forbidden_words": self._forbidden_words}
1143
+
1144
+ def get_instruction_args_keys(self):
1145
+ """Returns the args keys of `build_description`."""
1146
+ return ["forbidden_words"]
1147
+
1148
+ def check_following(self, value):
1149
+ """Check if the response does not contain the expected keywords."""
1150
+ for word in self._forbidden_words:
1151
+ if re.search(r"\b" + word + r"\b", value, flags=re.IGNORECASE):
1152
+ return False
1153
+ return True
1154
+
1155
+
1156
+ class RephraseParagraph(Instruction):
1157
+ """Checks that the paragraph is rephrased."""
1158
+
1159
+ def build_description(self, *, original_paragraph, low, high):
1160
+ """Builds the instruction description.
1161
+
1162
+ Args:
1163
+ original_paragraph: A string presenting the original paragraph. The
1164
+ rephrases response should have between low-high words in common.
1165
+ low: An integer presenting the lower bound of similar words.
1166
+ high: An integer representing the upper bound of similar words.
1167
+
1168
+ Returns:
1169
+ A string representing the instruction description.
1170
+ """
1171
+ # TODO(jeffrey) make more encompassing
1172
+ self._original_paragraph = original_paragraph
1173
+ self._low = low
1174
+ self._high = high
1175
+
1176
+ self._description = (
1177
+ "Rephrase the following paragraph: "
1178
+ + "{original_paragraph}\nYour response should have "
1179
+ + "between {low} and {high} of the same words. "
1180
+ + "Words are the same if and only if all of the "
1181
+ + "letters, ignoring cases, are the same. For "
1182
+ + "example, 'run' is the same as 'Run' but different "
1183
+ + "to 'ran'."
1184
+ )
1185
+
1186
+ return self._description.format(
1187
+ original_paragraph=original_paragraph, low=self._low, high=self._high
1188
+ )
1189
+
1190
+ def get_instruction_args(self):
1191
+ """Returns the keyword args of `build_description`."""
1192
+ return {
1193
+ "original_paragraph": self._original_paragraph,
1194
+ "low": self._low,
1195
+ "high": self._high,
1196
+ }
1197
+
1198
+ def get_instruction_args_keys(self):
1199
+ """Returns the args keys of `build_description`."""
1200
+ return ["original_paragraph", "low", "high"]
1201
+
1202
+ def check_following(self, value):
1203
+ val_words = re.findall(r"\w+", value.lower())
1204
+ original_words = re.findall(r"\w+", self._original_paragraph.lower())
1205
+ similar_words = 0
1206
+
1207
+ dict_val = collections.Counter(val_words)
1208
+ dict_original = collections.Counter(original_words)
1209
+
1210
+ for word in dict_original:
1211
+ similar_words += min(dict_original[word], dict_val[word])
1212
+
1213
+ return similar_words >= self._low and similar_words <= self._high
1214
+
1215
+
1216
+ class TwoResponsesChecker(Instruction):
1217
+ """Check that two responses were given."""
1218
+
1219
+ def build_description(self):
1220
+ """Build the instruction description."""
1221
+ self._description_pattern = (
1222
+ "Give two different responses. Responses and only responses should"
1223
+ " be separated by 6 asterisk symbols: ******."
1224
+ )
1225
+ return self._description_pattern
1226
+
1227
+ def get_instruction_args(self):
1228
+ """Returns the keyword args of `build_description`."""
1229
+ return None
1230
+
1231
+ def get_instruction_args_keys(self):
1232
+ """Returns the args keys of `build_description`."""
1233
+ return []
1234
+
1235
+ def check_following(self, value):
1236
+ """Checks if the response has two different answers.
1237
+
1238
+ Args:
1239
+ value: A string representing the response.
1240
+
1241
+ Returns:
1242
+ True if two responses are detected and false otherwise.
1243
+ """
1244
+ valid_responses = list()
1245
+ responses = value.split("******")
1246
+ for index, response in enumerate(responses):
1247
+ if not response.strip():
1248
+ if index != 0 and index != len(responses) - 1:
1249
+ return False
1250
+ else:
1251
+ valid_responses.append(response)
1252
+ return (
1253
+ len(valid_responses) == 2
1254
+ and valid_responses[0].strip() != valid_responses[1].strip()
1255
+ )
1256
+
1257
+
1258
+ class RepeatPromptThenAnswer(Instruction):
1259
+ """Checks that Prompt is first repeated then answered."""
1260
+
1261
+ def build_description(self, *, prompt_to_repeat=None):
1262
+ """Build the instruction description.
1263
+
1264
+ Args:
1265
+ prompt_to_repeat: The prompt that is meant to be repeated.
1266
+
1267
+ Returns:
1268
+ A string representing the instruction description.
1269
+ """
1270
+ if not prompt_to_repeat:
1271
+ raise ValueError("prompt_to_repeat must be set.")
1272
+ else:
1273
+ self._prompt_to_repeat = prompt_to_repeat
1274
+ self._description_pattern = (
1275
+ "First repeat the request word for word without change,"
1276
+ " then give your answer (1. do not say any words or characters"
1277
+ " before repeating the request; 2. the request you need to repeat"
1278
+ " does not include this sentence)"
1279
+ )
1280
+ return self._description_pattern
1281
+
1282
+ def get_instruction_args(self):
1283
+ return {"prompt_to_repeat": self._prompt_to_repeat}
1284
+
1285
+ def get_instruction_args_keys(self):
1286
+ """Returns the args keys of `build_description`."""
1287
+ return ["prompt_to_repeat"]
1288
+
1289
+ def check_following(self, value):
1290
+ if value.strip().lower().startswith(self._prompt_to_repeat.strip().lower()):
1291
+ return True
1292
+ return False
1293
+
1294
+
1295
+ class EndChecker(Instruction):
1296
+ """Checks that the prompt ends with a given phrase."""
1297
+
1298
+ def build_description(self, *, end_phrase=None):
1299
+ """Build the instruction description.
1300
+
1301
+ Args:
1302
+ end_phrase: A string representing the phrase the response should end with.
1303
+
1304
+ Returns:
1305
+ A string representing the instruction description.
1306
+ """
1307
+ self._end_phrase = (
1308
+ end_phrase.strip() if isinstance(end_phrase, str) else end_phrase
1309
+ )
1310
+ if self._end_phrase is None:
1311
+ self._end_phrase = random.choice(_ENDING_OPTIONS)
1312
+ self._description_pattern = (
1313
+ "Finish your response with this exact phrase {ender}. "
1314
+ "No other words should follow this phrase."
1315
+ )
1316
+ return self._description_pattern.format(ender=self._end_phrase)
1317
+
1318
+ def get_instruction_args(self):
1319
+ return {"end_phrase": self._end_phrase}
1320
+
1321
+ def get_instruction_args_keys(self):
1322
+ """Returns the args keys of `build_description`."""
1323
+ return ["end_phrase"]
1324
+
1325
+ def check_following(self, value):
1326
+ """Checks if the response ends with the expected phrase."""
1327
+ value = value.strip().strip('"').lower()
1328
+ self._end_phrase = self._end_phrase.strip().lower()
1329
+ return value.endswith(self._end_phrase)
1330
+
1331
+
1332
+ class TitleChecker(Instruction):
1333
+ """Checks the response for a title."""
1334
+
1335
+ def build_description(self):
1336
+ """Build the instruction description."""
1337
+ self._description_pattern = (
1338
+ "Your answer must contain a title, wrapped in double angular brackets,"
1339
+ " such as <<poem of joy>>."
1340
+ )
1341
+ return self._description_pattern
1342
+
1343
+ def get_instruction_args(self):
1344
+ return None
1345
+
1346
+ def get_instruction_args_keys(self):
1347
+ """Returns the args keys of `build_description`."""
1348
+ return []
1349
+
1350
+ def check_following(self, value):
1351
+ """Checks if the response contains a title."""
1352
+ pattern = r"<<[^\n]+>>"
1353
+ re_pattern = re.compile(pattern)
1354
+ titles = re.findall(re_pattern, value)
1355
+
1356
+ for title in titles:
1357
+ if title.lstrip("<").rstrip(">").strip():
1358
+ return True
1359
+ return False
1360
+
1361
+
1362
+ class LetterFrequencyChecker(Instruction):
1363
+ """Checks letter frequency."""
1364
+
1365
+ def build_description(self, *, letter=None, let_frequency=None, let_relation=None):
1366
+ """Build the instruction description.
1367
+
1368
+ Args:
1369
+ letter: A string representing a letter that is expected in the response.
1370
+ let_frequency: An integer specifying the number of times `keyword` is
1371
+ expected to appear in the response.
1372
+ let_relation: A string in (`less than`, `at least`), defining the
1373
+ relational operator for comparison. Two relational comparisons are
1374
+ supported for now; if 'less than', the actual number of
1375
+ occurrences < frequency; if 'at least', the actual number of
1376
+ occurrences >= frequency.
1377
+
1378
+ Returns:
1379
+ A string representing the instruction description.
1380
+ """
1381
+ if (
1382
+ not letter
1383
+ or len(letter) > 1
1384
+ or ord(letter.lower()) < 97
1385
+ or ord(letter.lower()) > 122
1386
+ ):
1387
+ self._letter = random.choice(list(string.ascii_letters))
1388
+ else:
1389
+ self._letter = letter.strip()
1390
+ self._letter = self._letter.lower()
1391
+
1392
+ self._frequency = let_frequency
1393
+ if self._frequency is None or self._frequency < 0:
1394
+ self._frequency = random.randint(1, _LETTER_FREQUENCY)
1395
+
1396
+ if let_relation is None:
1397
+ self._comparison_relation = random.choice(_COMPARISON_RELATION)
1398
+ elif let_relation not in _COMPARISON_RELATION:
1399
+ raise ValueError(
1400
+ "The supported relation for comparison must be in "
1401
+ f"{_COMPARISON_RELATION}, but {let_relation} is given."
1402
+ )
1403
+ else:
1404
+ self._comparison_relation = let_relation
1405
+
1406
+ self._description_pattern = (
1407
+ "In your response, the letter {letter} should appear {let_relation}"
1408
+ " {let_frequency} times."
1409
+ )
1410
+
1411
+ return self._description_pattern.format(
1412
+ letter=self._letter,
1413
+ let_frequency=self._frequency,
1414
+ let_relation=self._comparison_relation,
1415
+ )
1416
+
1417
+ def get_instruction_args(self):
1418
+ """Returns the keyword args of build description."""
1419
+ return {
1420
+ "letter": self._letter,
1421
+ "let_frequency": self._frequency,
1422
+ "let_relation": self._comparison_relation,
1423
+ }
1424
+
1425
+ def get_instruction_args_keys(self):
1426
+ """Returns the args keys of `build_description`."""
1427
+ return ["letter", "let_frequency", "let_relation"]
1428
+
1429
+ def check_following(self, value):
1430
+ """Checks that the response contains the letter at the right frequency."""
1431
+ value = value.lower()
1432
+ letters = collections.Counter(value)
1433
+
1434
+ if self._comparison_relation == _COMPARISON_RELATION[0]:
1435
+ return letters[self._letter] < self._frequency
1436
+ else:
1437
+ return letters[self._letter] >= self._frequency
1438
+
1439
+
1440
+ class CapitalLettersEnglishChecker(Instruction):
1441
+ """Checks that the response is in english and is in all capital letters."""
1442
+
1443
+ def build_description(self):
1444
+ """Build the instruction description."""
1445
+ self._description_pattern = (
1446
+ "Your entire response should be in English, and in all capital letters."
1447
+ )
1448
+ return self._description_pattern
1449
+
1450
+ def get_instruction_args(self):
1451
+ return None
1452
+
1453
+ def get_instruction_args_keys(self):
1454
+ """Returns the args keys of `build_description`."""
1455
+ return []
1456
+
1457
+ def check_following(self, value):
1458
+ """Checks that the response is in English and in all capital letters."""
1459
+ assert isinstance(value, str)
1460
+
1461
+ try:
1462
+ return value.isupper() and langdetect.detect(value) == "en"
1463
+ except langdetect.LangDetectException as e:
1464
+ # Count as instruction is followed.
1465
+ logging.error(
1466
+ "Unable to detect language for text %s due to %s", value, e
1467
+ ) # refex: disable=pytotw.037
1468
+ return True
1469
+
1470
+
1471
+ class LowercaseLettersEnglishChecker(Instruction):
1472
+ """Checks that the response is in english and is in all lowercase letters."""
1473
+
1474
+ def build_description(self):
1475
+ """Build the instruction description."""
1476
+ self._description_pattern = (
1477
+ "Your entire response should be in English, and in all lowercase"
1478
+ " letters. No capital letters are allowed."
1479
+ )
1480
+ return self._description_pattern
1481
+
1482
+ def get_instruction_args(self):
1483
+ return None
1484
+
1485
+ def get_instruction_args_keys(self):
1486
+ """Returns the args keys of `build_description`."""
1487
+ return []
1488
+
1489
+ def check_following(self, value):
1490
+ """Checks that the response is in English and in all lowercase letters."""
1491
+ assert isinstance(value, str)
1492
+
1493
+ try:
1494
+ return value.islower() and langdetect.detect(value) == "en"
1495
+ except langdetect.LangDetectException as e:
1496
+ # Count as instruction is followed.
1497
+ logging.error(
1498
+ "Unable to detect language for text %s due to %s", value, e
1499
+ ) # refex: disable=pytotw.037
1500
+ return True
1501
+
1502
+
1503
+ class CommaChecker(Instruction):
1504
+ """Checks the response for no commas."""
1505
+
1506
+ def build_description(self):
1507
+ """Build the instruction description."""
1508
+ self._description_pattern = (
1509
+ "In your entire response, refrain from the use of any commas."
1510
+ )
1511
+ return self._description_pattern
1512
+
1513
+ def get_instruction_args(self):
1514
+ return None
1515
+
1516
+ def get_instruction_args_keys(self):
1517
+ """Returns the args keys of `build_description`."""
1518
+ return []
1519
+
1520
+ def check_following(self, value):
1521
+ """Checks that the response does not contain commas."""
1522
+ return not re.search(r"\,", value)
1523
+
1524
+
1525
+ class CapitalWordFrequencyChecker(Instruction):
1526
+ """Checks frequency of words with all capital letters."""
1527
+
1528
+ def build_description(
1529
+ self,
1530
+ capital_frequency=None,
1531
+ capital_relation=None,
1532
+ ):
1533
+ """Build the instruction description.
1534
+
1535
+ Args:
1536
+ capital_frequency: An integer that represents the number of words that
1537
+ should be in all capital letters.
1538
+ capital_relation: A string that is 'at least' or 'at most' that refers to
1539
+ the frequency.
1540
+
1541
+ Returns:
1542
+ A string representing the instruction description.
1543
+ """
1544
+ self._frequency = capital_frequency
1545
+ if self._frequency is None:
1546
+ self._frequency = random.randint(1, _ALL_CAPITAL_WORD_FREQUENCY)
1547
+
1548
+ self._comparison_relation = capital_relation
1549
+ if capital_relation is None:
1550
+ self._comparison_relation = random.choice(_COMPARISON_RELATION)
1551
+ elif capital_relation not in _COMPARISON_RELATION:
1552
+ raise ValueError(
1553
+ "The supported relation for comparison must be in "
1554
+ f"{_COMPARISON_RELATION}, but {capital_relation} is given."
1555
+ )
1556
+
1557
+ self._description_pattern = (
1558
+ "In your response, words with all capital letters should appear"
1559
+ " {relation} {frequency} times."
1560
+ )
1561
+
1562
+ return self._description_pattern.format(
1563
+ frequency=self._frequency, relation=self._comparison_relation
1564
+ )
1565
+
1566
+ def get_instruction_args(self):
1567
+ """Returns the keyword args of build description."""
1568
+ return {
1569
+ "capital_frequency": self._frequency,
1570
+ "capital_relation": self._comparison_relation,
1571
+ }
1572
+
1573
+ def get_instruction_args_keys(self):
1574
+ """Returns the args keys of `build_description`."""
1575
+ return ["capital_frequency", "capital_relation"]
1576
+
1577
+ def check_following(self, value):
1578
+ """Checks the frequency of words with all capital letters."""
1579
+ # Hyphenated words will count as one word
1580
+ words = instructions_util.nltk.word_tokenize(value)
1581
+ capital_words = [word for word in words if word.isupper()]
1582
+
1583
+ capital_words = len(capital_words)
1584
+
1585
+ if self._comparison_relation == _COMPARISON_RELATION[0]:
1586
+ return capital_words < self._frequency
1587
+ else:
1588
+ return capital_words >= self._frequency
1589
+
1590
+
1591
+ class QuotationChecker(Instruction):
1592
+ """Checks response is wrapped with double quotation marks."""
1593
+
1594
+ def build_description(self):
1595
+ """Build the instruction description."""
1596
+ self._description_pattern = (
1597
+ "Wrap your entire response with double quotation marks."
1598
+ )
1599
+ return self._description_pattern
1600
+
1601
+ def get_instruction_args(self):
1602
+ """Returns the keyword args of build description."""
1603
+ return None
1604
+
1605
+ def get_instruction_args_keys(self):
1606
+ """Returns the args keys of `build_description`."""
1607
+ return []
1608
+
1609
+ def check_following(self, value):
1610
+ """Checks if the response is wrapped with double quotation marks."""
1611
+ value = value.strip()
1612
+ return len(value) > 1 and value[0] == '"' and value[-1] == '"'
1613
+
1614
+
1615
+ # ---------------------------------------------------------------------------
1616
+ # NEW IFTrain out-of-distribution constraints
1617
+ # ---------------------------------------------------------------------------
1618
+
1619
+ class KeywordOnceChecker(Instruction):
1620
+ """Check that a keyword appears at least once in the response."""
1621
+
1622
+ def build_description(self, *, keyword=None):
1623
+ if not keyword:
1624
+ self._keyword = instructions_util.generate_keywords(num_keywords=1)[0]
1625
+ else:
1626
+ self._keyword = keyword.strip()
1627
+ self._description_pattern = (
1628
+ "Include keyword {keyword} in your response."
1629
+ )
1630
+ return self._description_pattern.format(keyword=self._keyword)
1631
+
1632
+ def get_instruction_args(self):
1633
+ return {"keyword": self._keyword}
1634
+
1635
+ def get_instruction_args_keys(self):
1636
+ return ["keyword"]
1637
+
1638
+ def check_following(self, value):
1639
+ return bool(re.search(r"\b" + re.escape(self._keyword) + r"\b", value, flags=re.IGNORECASE))
1640
+
1641
+
1642
+ class KeywordExactCountChecker(Instruction):
1643
+ """Check that a word appears exactly N times in the response."""
1644
+
1645
+ def build_description(self, *, keyword=None, count=None):
1646
+ if not keyword:
1647
+ self._keyword = instructions_util.generate_keywords(num_keywords=1)[0]
1648
+ else:
1649
+ self._keyword = keyword.strip()
1650
+ self._count = count
1651
+ if self._count is None or self._count < 1:
1652
+ self._count = random.randint(1, 5)
1653
+ self._description_pattern = (
1654
+ "In your response, the word {keyword} should appear {count} times."
1655
+ )
1656
+ return self._description_pattern.format(keyword=self._keyword, count=self._count)
1657
+
1658
+ def get_instruction_args(self):
1659
+ return {"keyword": self._keyword, "count": self._count}
1660
+
1661
+ def get_instruction_args_keys(self):
1662
+ return ["keyword", "count"]
1663
+
1664
+ def check_following(self, value):
1665
+ occurrences = len(re.findall(r"\b" + re.escape(self._keyword) + r"\b", value, flags=re.IGNORECASE))
1666
+ return occurrences == self._count
1667
+
1668
+
1669
+ class ExcludeWordHarderChecker(Instruction):
1670
+ """Check that a specific keyword from the instruction is not used."""
1671
+
1672
+ def build_description(self, *, keyword=None):
1673
+ if not keyword:
1674
+ self._keyword = instructions_util.generate_keywords(num_keywords=1)[0]
1675
+ else:
1676
+ self._keyword = keyword.strip()
1677
+ self._description_pattern = (
1678
+ "Do not include keyword {keyword} in the response."
1679
+ )
1680
+ return self._description_pattern.format(keyword=self._keyword)
1681
+
1682
+ def get_instruction_args(self):
1683
+ return {"keyword": self._keyword}
1684
+
1685
+ def get_instruction_args_keys(self):
1686
+ return ["keyword"]
1687
+
1688
+ def check_following(self, value):
1689
+ return not bool(re.search(r"\b" + re.escape(self._keyword) + r"\b", value, flags=re.IGNORECASE))
1690
+
1691
+
1692
+ class LetterExactCountChecker(Instruction):
1693
+ """Check that a specific letter appears exactly N times in the response."""
1694
+
1695
+ def build_description(self, *, letter=None, count=None):
1696
+ if (
1697
+ not letter
1698
+ or len(letter) > 1
1699
+ or ord(letter.lower()) < 97
1700
+ or ord(letter.lower()) > 122
1701
+ ):
1702
+ self._letter = random.choice(list(string.ascii_lowercase))
1703
+ else:
1704
+ self._letter = letter.strip().lower()
1705
+ self._count = count
1706
+ if self._count is None or self._count < 1:
1707
+ self._count = random.randint(1, _LETTER_FREQUENCY)
1708
+ self._description_pattern = (
1709
+ "In your response, the letter {letter} should appear {count} times."
1710
+ )
1711
+ return self._description_pattern.format(letter=self._letter, count=self._count)
1712
+
1713
+ def get_instruction_args(self):
1714
+ return {"letter": self._letter, "count": self._count}
1715
+
1716
+ def get_instruction_args_keys(self):
1717
+ return ["letter", "count"]
1718
+
1719
+ def check_following(self, value):
1720
+ value = value.lower()
1721
+ letters = collections.Counter(value)
1722
+ return letters[self._letter] == self._count
1723
+
1724
+
1725
+ class ParagraphsTwoLineBreaksChecker(Instruction):
1726
+ """Check paragraphs separated by two line breaks (\\n\\n)."""
1727
+
1728
+ def build_description(self, *, num_paragraphs=None):
1729
+ self._num_paragraphs = num_paragraphs
1730
+ if self._num_paragraphs is None or self._num_paragraphs < 1:
1731
+ self._num_paragraphs = random.randint(2, _NUM_PARAGRAPHS)
1732
+ self._description_pattern = (
1733
+ "There should be {num_paragraphs} paragraphs. "
1734
+ "Paragraphs and only paragraphs are separated with each other by two line breaks."
1735
+ )
1736
+ return self._description_pattern.format(num_paragraphs=self._num_paragraphs)
1737
+
1738
+ def get_instruction_args(self):
1739
+ return {"num_paragraphs": self._num_paragraphs}
1740
+
1741
+ def get_instruction_args_keys(self):
1742
+ return ["num_paragraphs"]
1743
+
1744
+ def check_following(self, value):
1745
+ paragraphs = re.split(r"\n\n", value)
1746
+ num_paragraphs = len([p for p in paragraphs if p.strip()])
1747
+ return num_paragraphs == self._num_paragraphs
1748
+
1749
+
1750
+ class FirstWordSentenceChecker(Instruction):
1751
+ """Check that the first word of each sentence is a specific word."""
1752
+
1753
+ def build_description(self, *, first_word=None):
1754
+ if not first_word:
1755
+ self._first_word = instructions_util.generate_keywords(num_keywords=1)[0]
1756
+ else:
1757
+ self._first_word = first_word.strip().lower()
1758
+ self._description_pattern = (
1759
+ "The first word of each sentence should be the word {first_word}."
1760
+ )
1761
+ return self._description_pattern.format(first_word=self._first_word)
1762
+
1763
+ def get_instruction_args(self):
1764
+ return {"first_word": self._first_word}
1765
+
1766
+ def get_instruction_args_keys(self):
1767
+ return ["first_word"]
1768
+
1769
+ def check_following(self, value):
1770
+ sentences = instructions_util.split_into_sentences(value)
1771
+ if not sentences:
1772
+ return False
1773
+ for sentence in sentences:
1774
+ sentence = sentence.strip()
1775
+ if not sentence:
1776
+ continue
1777
+ words = re.findall(r"\w+", sentence)
1778
+ if not words:
1779
+ return False
1780
+ if words[0].lower() != self._first_word.lower():
1781
+ return False
1782
+ return True
1783
+
1784
+
1785
+ class FirstWordAnswerChecker(Instruction):
1786
+ """Check that the first word of the entire response is a specific word."""
1787
+
1788
+ def build_description(self, *, first_word=None):
1789
+ if not first_word:
1790
+ self._first_word = instructions_util.generate_keywords(num_keywords=1)[0]
1791
+ else:
1792
+ self._first_word = first_word.strip().lower()
1793
+ self._description_pattern = (
1794
+ "The first word of your response should be the word {first_word}."
1795
+ )
1796
+ return self._description_pattern.format(first_word=self._first_word)
1797
+
1798
+ def get_instruction_args(self):
1799
+ return {"first_word": self._first_word}
1800
+
1801
+ def get_instruction_args_keys(self):
1802
+ return ["first_word"]
1803
+
1804
+ def check_following(self, value):
1805
+ value = value.strip()
1806
+ words = re.findall(r"\w+", value)
1807
+ if not words:
1808
+ return False
1809
+ return words[0].lower() == self._first_word.lower()
1810
+
1811
+
1812
+ class LastWordSentenceChecker(Instruction):
1813
+ """Check that the last word of each sentence (before punctuation) is a specific word."""
1814
+
1815
+ def build_description(self, *, last_word=None):
1816
+ if not last_word:
1817
+ self._last_word = instructions_util.generate_keywords(num_keywords=1)[0]
1818
+ else:
1819
+ self._last_word = last_word.strip().lower()
1820
+ self._description_pattern = (
1821
+ "The last word of each sentence, before punctuation, should be the word {last_word}."
1822
+ )
1823
+ return self._description_pattern.format(last_word=self._last_word)
1824
+
1825
+ def get_instruction_args(self):
1826
+ return {"last_word": self._last_word}
1827
+
1828
+ def get_instruction_args_keys(self):
1829
+ return ["last_word"]
1830
+
1831
+ def check_following(self, value):
1832
+ sentences = instructions_util.split_into_sentences(value)
1833
+ if not sentences:
1834
+ return False
1835
+ for sentence in sentences:
1836
+ sentence = sentence.strip()
1837
+ if not sentence:
1838
+ continue
1839
+ words = re.findall(r"\w+", sentence)
1840
+ if not words:
1841
+ return False
1842
+ if words[-1].lower() != self._last_word.lower():
1843
+ return False
1844
+ return True
1845
+
1846
+
1847
+ class LastWordAnswerChecker(Instruction):
1848
+ """Check that the last word of the entire response is a specific word."""
1849
+
1850
+ def build_description(self, *, last_word=None):
1851
+ if not last_word:
1852
+ self._last_word = instructions_util.generate_keywords(num_keywords=1)[0]
1853
+ else:
1854
+ self._last_word = last_word.strip().lower()
1855
+ self._description_pattern = (
1856
+ "The last word of your response should be the word {last_word}."
1857
+ )
1858
+ return self._description_pattern.format(last_word=self._last_word)
1859
+
1860
+ def get_instruction_args(self):
1861
+ return {"last_word": self._last_word}
1862
+
1863
+ def get_instruction_args_keys(self):
1864
+ return ["last_word"]
1865
+
1866
+ def check_following(self, value):
1867
+ value = value.strip()
1868
+ # Remove trailing punctuation to get the last word
1869
+ words = re.findall(r"\w+", value)
1870
+ if not words:
1871
+ return False
1872
+ return words[-1].lower() == self._last_word.lower()
1873
+
1874
+
1875
+ class BigramWrappingChecker(Instruction):
1876
+ """Check that every word bigram is wrapped in double angular brackets «...»."""
1877
+
1878
+ def build_description(self):
1879
+ self._description_pattern = (
1880
+ "Wrap every word bigram in double angular brackets, "
1881
+ "such as \u00abI am\u00bb \u00abat home\u00bb \u00abwith my\u00bb \u00abcute dog\u00bb."
1882
+ )
1883
+ return self._description_pattern
1884
+
1885
+ def get_instruction_args(self):
1886
+ return None
1887
+
1888
+ def get_instruction_args_keys(self):
1889
+ return []
1890
+
1891
+ def check_following(self, value):
1892
+ """Check that the response contains bigrams wrapped in «...»."""
1893
+ # Find all bigrams wrapped in «...»
1894
+ wrapped_bigrams = re.findall(r"\u00ab[^\u00ab\u00bb]+\u00bb", value)
1895
+ if not wrapped_bigrams:
1896
+ return False
1897
+ # Each wrapped bigram should contain exactly 2 words
1898
+ for bigram in wrapped_bigrams:
1899
+ content = bigram.strip("\u00ab\u00bb").strip()
1900
+ words = content.split()
1901
+ if len(words) != 2:
1902
+ return False
1903
+ return True
1904
+
1905
+
1906
+ class CopyingSimpleChecker(Instruction):
1907
+ """Check that the request is repeated without change and not answered."""
1908
+
1909
+ def build_description(self, *, prompt_to_copy=None):
1910
+ if not prompt_to_copy:
1911
+ raise ValueError("prompt_to_copy must be set.")
1912
+ self._prompt_to_copy = prompt_to_copy
1913
+ self._description_pattern = (
1914
+ "Repeat the request without change (do not say anything before repeating "
1915
+ "the request; the request you need to repeat does not include this sentence) "
1916
+ "and do not answer the actual request!"
1917
+ )
1918
+ return self._description_pattern
1919
+
1920
+ def get_instruction_args(self):
1921
+ return {"prompt_to_copy": self._prompt_to_copy}
1922
+
1923
+ def get_instruction_args_keys(self):
1924
+ return ["prompt_to_copy"]
1925
+
1926
+ def check_following(self, value):
1927
+ value = value.strip()
1928
+ prompt = self._prompt_to_copy.strip()
1929
+ return value.lower() == prompt.lower()
1930
+
1931
+
1932
+ class CopyingMultipleChecker(Instruction):
1933
+ """Check that the request is repeated N times separated by 6 asterisks."""
1934
+
1935
+ def build_description(self, *, prompt_to_copy=None, copy_count=None):
1936
+ if not prompt_to_copy:
1937
+ raise ValueError("prompt_to_copy must be set.")
1938
+ self._prompt_to_copy = prompt_to_copy
1939
+ self._copy_count = copy_count
1940
+ if self._copy_count is None or self._copy_count < 1:
1941
+ self._copy_count = random.randint(2, 4)
1942
+ self._description_pattern = (
1943
+ "Repeat the request without change {copy_count} times, separated by 6 asterisk symbols "
1944
+ "(do not say anything before repeating the request; the request you need to repeat "
1945
+ "does not include this sentence) and do not answer the actual request!"
1946
+ )
1947
+ return self._description_pattern.format(copy_count=self._copy_count)
1948
+
1949
+ def get_instruction_args(self):
1950
+ return {"prompt_to_copy": self._prompt_to_copy, "copy_count": self._copy_count}
1951
+
1952
+ def get_instruction_args_keys(self):
1953
+ return ["prompt_to_copy", "copy_count"]
1954
+
1955
+ def check_following(self, value):
1956
+ parts = value.split("******")
1957
+ valid_parts = [p.strip() for p in parts if p.strip()]
1958
+ if len(valid_parts) != self._copy_count:
1959
+ return False
1960
+ prompt = self._prompt_to_copy.strip().lower()
1961
+ return all(p.lower() == prompt for p in valid_parts)
1962
+
1963
+
1964
+ class NoDotChecker(Instruction):
1965
+ """Check that the response does not contain dots."""
1966
+
1967
+ def build_description(self):
1968
+ self._description_pattern = (
1969
+ "In your entire response, refrain from the use of . (i.e. dots) as punctuation and in general."
1970
+ )
1971
+ return self._description_pattern
1972
+
1973
+ def get_instruction_args(self):
1974
+ return None
1975
+
1976
+ def get_instruction_args_keys(self):
1977
+ return []
1978
+
1979
+ def check_following(self, value):
1980
+ return "." not in value
1981
+
1982
+
1983
+ class NoExclamationChecker(Instruction):
1984
+ """Check that the response does not contain exclamation marks."""
1985
+
1986
+ def build_description(self):
1987
+ self._description_pattern = (
1988
+ "In your entire response, refrain from the use of ! (i.e. exclamation marks) as punctuation and in general."
1989
+ )
1990
+ return self._description_pattern
1991
+
1992
+ def get_instruction_args(self):
1993
+ return None
1994
+
1995
+ def get_instruction_args_keys(self):
1996
+ return []
1997
+
1998
+ def check_following(self, value):
1999
+ return "!" not in value
2000
+
2001
+
2002
+ class LowercaseWordCountChecker(Instruction):
2003
+ """Check that all lowercase words appear at most N times."""
2004
+
2005
+ def build_description(self, *, max_count=None):
2006
+ self._max_count = max_count
2007
+ if self._max_count is None or self._max_count < 1:
2008
+ self._max_count = random.randint(1, 10)
2009
+ self._description_pattern = (
2010
+ "In your response, all lowercase words should appear at most {max_count} times."
2011
+ )
2012
+ return self._description_pattern.format(max_count=self._max_count)
2013
+
2014
+ def get_instruction_args(self):
2015
+ return {"max_count": self._max_count}
2016
+
2017
+ def get_instruction_args_keys(self):
2018
+ return ["max_count"]
2019
+
2020
+ def check_following(self, value):
2021
+ words = re.findall(r"\b[a-z]+\b", value)
2022
+ word_counts = collections.Counter(words)
2023
+ return all(count <= self._max_count for count in word_counts.values())
2024
+
2025
+
2026
+ class LetterTotalCountChecker(Instruction):
2027
+ """Check that the response contains exactly N letters (or with relation)."""
2028
+
2029
+ def build_description(self, *, num_letters=None, relation=None):
2030
+ self._num_letters = num_letters
2031
+ if self._num_letters is None or self._num_letters < 1:
2032
+ self._num_letters = random.randint(50, 500)
2033
+ if relation is None:
2034
+ self._relation = random.choice(_COMPARISON_RELATION)
2035
+ elif relation not in _COMPARISON_RELATION:
2036
+ raise ValueError(
2037
+ f"The supported relation must be in {_COMPARISON_RELATION}, but {relation} is given."
2038
+ )
2039
+ else:
2040
+ self._relation = relation
2041
+ self._description_pattern = (
2042
+ "Answer with {relation} {num_letters} letters."
2043
+ )
2044
+ return self._description_pattern.format(relation=self._relation, num_letters=self._num_letters)
2045
+
2046
+ def get_instruction_args(self):
2047
+ return {"num_letters": self._num_letters, "relation": self._relation}
2048
+
2049
+ def get_instruction_args_keys(self):
2050
+ return ["num_letters", "relation"]
2051
+
2052
+ def check_following(self, value):
2053
+ letter_count = sum(1 for c in value if c.isalpha())
2054
+ if self._relation == _COMPARISON_RELATION[0]:
2055
+ return letter_count < self._num_letters
2056
+ else:
2057
+ return letter_count >= self._num_letters
2058
+
2059
+
2060
+ class CountingCompositionChecker(Instruction):
2061
+ """Check 3 paragraphs with exactly N sentences each and M words per sentence."""
2062
+
2063
+ def build_description(self, *, n_sent=None, n_words=None):
2064
+ self._n_sent = n_sent
2065
+ if self._n_sent is None or self._n_sent < 1:
2066
+ self._n_sent = random.randint(2, 5)
2067
+ self._n_words = n_words
2068
+ if self._n_words is None or self._n_words < 1:
2069
+ self._n_words = random.randint(5, 15)
2070
+ self._description_pattern = (
2071
+ "Write 3 paragraphs, delimited by the markdown divider: ***, "
2072
+ "with exactly {n_sent} sentences each, with exactly {n_words} words in each sentence."
2073
+ )
2074
+ return self._description_pattern.format(n_sent=self._n_sent, n_words=self._n_words)
2075
+
2076
+ def get_instruction_args(self):
2077
+ return {"n_sent": self._n_sent, "n_words": self._n_words}
2078
+
2079
+ def get_instruction_args_keys(self):
2080
+ return ["n_sent", "n_words"]
2081
+
2082
+ def check_following(self, value):
2083
+ paragraphs = re.split(r"\s?\*\*\*\s?", value)
2084
+ paragraphs = [p.strip() for p in paragraphs if p.strip()]
2085
+ if len(paragraphs) != 3:
2086
+ return False
2087
+ for paragraph in paragraphs:
2088
+ sentences = instructions_util.split_into_sentences(paragraph)
2089
+ sentences = [s for s in sentences if s.strip()]
2090
+ if len(sentences) != self._n_sent:
2091
+ return False
2092
+ for sentence in sentences:
2093
+ word_count = instructions_util.count_words(sentence)
2094
+ if word_count != self._n_words:
2095
+ return False
2096
+ return True
2097
+
2098
+
2099
+ class UniqueWordsChecker(Instruction):
2100
+ """Check that only unique words are used (no word repeated)."""
2101
+
2102
+ def build_description(self):
2103
+ self._description_pattern = (
2104
+ "Only use unique words in your response, no word should be repeated!"
2105
+ )
2106
+ return self._description_pattern
2107
+
2108
+ def get_instruction_args(self):
2109
+ return None
2110
+
2111
+ def get_instruction_args_keys(self):
2112
+ return []
2113
+
2114
+ def check_following(self, value):
2115
+ words = re.findall(r"\b\w+\b", value.lower())
2116
+ return len(words) == len(set(words))
2117
+
2118
+
2119
+ class IncrementKeywordChecker(Instruction):
2120
+ """Check that keyword1 appears once and keyword2 appears twice."""
2121
+
2122
+ def build_description(self, *, keyword1=None, keyword2=None):
2123
+ if not keyword1 or not keyword2:
2124
+ kws = instructions_util.generate_keywords(num_keywords=2)
2125
+ self._keyword1 = keyword1.strip() if keyword1 else kws[0]
2126
+ self._keyword2 = keyword2.strip() if keyword2 else kws[1]
2127
+ else:
2128
+ self._keyword1 = keyword1.strip()
2129
+ self._keyword2 = keyword2.strip()
2130
+ self._description_pattern = (
2131
+ "Include keyword {keyword1} once in your response, keyword {keyword2} twice in your response."
2132
+ )
2133
+ return self._description_pattern.format(keyword1=self._keyword1, keyword2=self._keyword2)
2134
+
2135
+ def get_instruction_args(self):
2136
+ return {"keyword1": self._keyword1, "keyword2": self._keyword2}
2137
+
2138
+ def get_instruction_args_keys(self):
2139
+ return ["keyword1", "keyword2"]
2140
+
2141
+ def check_following(self, value):
2142
+ count1 = len(re.findall(r"\b" + re.escape(self._keyword1) + r"\b", value, flags=re.IGNORECASE))
2143
+ count2 = len(re.findall(r"\b" + re.escape(self._keyword2) + r"\b", value, flags=re.IGNORECASE))
2144
+ return count1 == 1 and count2 == 2
2145
+
2146
+
2147
+ class PalindromeChecker(Instruction):
2148
+ """Check that the response includes a palindrome."""
2149
+
2150
+ def build_description(self):
2151
+ self._description_pattern = "Include a palindrome in your response."
2152
+ return self._description_pattern
2153
+
2154
+ def get_instruction_args(self):
2155
+ return None
2156
+
2157
+ def get_instruction_args_keys(self):
2158
+ return []
2159
+
2160
+ def check_following(self, value):
2161
+ words = re.findall(r"\b\w+\b", value.lower())
2162
+ for word in words:
2163
+ if len(word) > 1 and word == word[::-1]:
2164
+ return True
2165
+ return False
2166
+
2167
+
2168
+ class KeywordSpecificPositionChecker(Instruction):
2169
+ """Check that a keyword appears in the Nth sentence as the Mth word."""
2170
+
2171
+ def build_description(self, *, keyword=None, sentence_num=None, word_num=None):
2172
+ if not keyword:
2173
+ self._keyword = instructions_util.generate_keywords(num_keywords=1)[0]
2174
+ else:
2175
+ self._keyword = keyword.strip()
2176
+ self._sentence_num = sentence_num
2177
+ if self._sentence_num is None or self._sentence_num < 1:
2178
+ self._sentence_num = random.randint(1, 5)
2179
+ self._word_num = word_num
2180
+ if self._word_num is None or self._word_num < 1:
2181
+ self._word_num = random.randint(1, 5)
2182
+ self._description_pattern = (
2183
+ "Include keyword {keyword} in the {sentence_num}-th sentence, "
2184
+ "as the {word_num}-th word of that sentence."
2185
+ )
2186
+ return self._description_pattern.format(
2187
+ keyword=self._keyword,
2188
+ sentence_num=self._sentence_num,
2189
+ word_num=self._word_num,
2190
+ )
2191
+
2192
+ def get_instruction_args(self):
2193
+ return {
2194
+ "keyword": self._keyword,
2195
+ "sentence_num": self._sentence_num,
2196
+ "word_num": self._word_num,
2197
+ }
2198
+
2199
+ def get_instruction_args_keys(self):
2200
+ return ["keyword", "sentence_num", "word_num"]
2201
+
2202
+ def check_following(self, value):
2203
+ sentences = instructions_util.split_into_sentences(value)
2204
+ sentences = [s for s in sentences if s.strip()]
2205
+ if len(sentences) < self._sentence_num:
2206
+ return False
2207
+ target_sentence = sentences[self._sentence_num - 1]
2208
+ words = re.findall(r"\w+", target_sentence)
2209
+ if len(words) < self._word_num:
2210
+ return False
2211
+ return words[self._word_num - 1].lower() == self._keyword.lower()
2212
+
2213
+
2214
+ class StartEndSameWordChecker(Instruction):
2215
+ """Check that the response starts and ends with the same word."""
2216
+
2217
+ def build_description(self):
2218
+ self._description_pattern = (
2219
+ "Start and end your response with the same word "
2220
+ "(do not write anything after the last word, not even punctuation)."
2221
+ )
2222
+ return self._description_pattern
2223
+
2224
+ def get_instruction_args(self):
2225
+ return None
2226
+
2227
+ def get_instruction_args_keys(self):
2228
+ return []
2229
+
2230
+ def check_following(self, value):
2231
+ value = value.strip()
2232
+ words = re.findall(r"\w+", value)
2233
+ if len(words) < 2:
2234
+ return False
2235
+ return words[0].lower() == words[-1].lower()
2236
+
2237
+
2238
+ class RepeatPhraseChecker(Instruction):
2239
+ """Check that a phrase is repeated N times with one word replaced each time."""
2240
+
2241
+ def build_description(self, *, phrase=None, repeat_count=None):
2242
+ if not phrase:
2243
+ kws = instructions_util.generate_keywords(num_keywords=3)
2244
+ self._phrase = " ".join(kws)
2245
+ else:
2246
+ self._phrase = phrase.strip()
2247
+ self._repeat_count = repeat_count
2248
+ if self._repeat_count is None or self._repeat_count < 2:
2249
+ self._repeat_count = random.randint(2, 4)
2250
+ self._description_pattern = (
2251
+ "Repeat the phrase \"{phrase}\" exactly {repeat_count} times, "
2252
+ "transforming it slightly each time by replacing one word."
2253
+ )
2254
+ return self._description_pattern.format(phrase=self._phrase, repeat_count=self._repeat_count)
2255
+
2256
+ def get_instruction_args(self):
2257
+ return {"phrase": self._phrase, "repeat_count": self._repeat_count}
2258
+
2259
+ def get_instruction_args_keys(self):
2260
+ return ["phrase", "repeat_count"]
2261
+
2262
+ def check_following(self, value):
2263
+ """Check that the phrase (with one word replaced) appears repeat_count times."""
2264
+ phrase_words = self._phrase.lower().split()
2265
+ n = len(phrase_words)
2266
+ if n == 0:
2267
+ return False
2268
+ # Find all n-grams in the response
2269
+ value_words = re.findall(r"\w+", value.lower())
2270
+ count = 0
2271
+ for i in range(len(value_words) - n + 1):
2272
+ ngram = value_words[i:i + n]
2273
+ # Count differences
2274
+ diffs = sum(1 for a, b in zip(ngram, phrase_words) if a != b)
2275
+ if diffs <= 1:
2276
+ count += 1
2277
+ return count == self._repeat_count
2278
+
2279
+
2280
+ class NoAdjacentConsecutiveLettersChecker(Instruction):
2281
+ """Check that no two adjacent words start with consecutive letters of the alphabet."""
2282
+
2283
+ def build_description(self):
2284
+ self._description_pattern = (
2285
+ "No two adjacent words can start with consecutive letters of the alphabet."
2286
+ )
2287
+ return self._description_pattern
2288
+
2289
+ def get_instruction_args(self):
2290
+ return None
2291
+
2292
+ def get_instruction_args_keys(self):
2293
+ return []
2294
+
2295
+ def check_following(self, value):
2296
+ words = re.findall(r"\b[a-zA-Z]\w*\b", value)
2297
+ for i in range(len(words) - 1):
2298
+ c1 = words[i][0].lower()
2299
+ c2 = words[i + 1][0].lower()
2300
+ if abs(ord(c1) - ord(c2)) == 1:
2301
+ return False
2302
+ return True
2303
+
2304
+
2305
+ class SquareBracketsWordChecker(Instruction):
2306
+ """Check that every word in the response is enclosed within square brackets."""
2307
+
2308
+ def build_description(self):
2309
+ self._description_pattern = (
2310
+ "Enclose every word in your response within square brackets."
2311
+ )
2312
+ return self._description_pattern
2313
+
2314
+ def get_instruction_args(self):
2315
+ return None
2316
+
2317
+ def get_instruction_args_keys(self):
2318
+ return []
2319
+
2320
+ def check_following(self, value):
2321
+ # Find all tokens that look like words (not already in brackets)
2322
+ # Check that every word-like token is inside [...]
2323
+ # Strategy: find all words not inside brackets
2324
+ # Remove all bracketed content
2325
+ without_brackets = re.sub(r"\[[^\[\]]*\]", "", value)
2326
+ # After removing bracketed content, there should be no standalone words left
2327
+ remaining_words = re.findall(r"\b[a-zA-Z]+\b", without_brackets)
2328
+ return len(remaining_words) == 0
2329
+
2330
+
2331
+ class SentenceHyphensChecker(Instruction):
2332
+ """Check that all sentences are connected using hyphens with no spaces between them."""
2333
+
2334
+ def build_description(self):
2335
+ self._description_pattern = (
2336
+ "All sentences must be connected using hyphens, with no spaces between them."
2337
+ )
2338
+ return self._description_pattern
2339
+
2340
+ def get_instruction_args(self):
2341
+ return None
2342
+
2343
+ def get_instruction_args_keys(self):
2344
+ return []
2345
+
2346
+ def check_following(self, value):
2347
+ value = value.strip()
2348
+ # Check that the response contains hyphens connecting sentences
2349
+ # and no spaces around hyphens used as sentence connectors
2350
+ if not value:
2351
+ return False
2352
+ # The response should not contain ". " (sentence-ending space) patterns
2353
+ # but should contain "-" connecting sentences
2354
+ # Simple check: no spaces around sentence-connecting hyphens
2355
+ # and the response uses hyphens as connectors
2356
+ has_hyphen_connector = bool(re.search(r"\w-\w", value))
2357
+ has_sentence_space = bool(re.search(r"[.!?]\s+[A-Z]", value))
2358
+ return has_hyphen_connector and not has_sentence_space
2359
+
2360
+
2361
+ class CopyInstructionChecker(Instruction):
2362
+ """Check that the instruction is copied verbatim without following it."""
2363
+
2364
+ def build_description(self, *, instruction_to_copy=None):
2365
+ if not instruction_to_copy:
2366
+ raise ValueError("instruction_to_copy must be set.")
2367
+ self._instruction_to_copy = instruction_to_copy
2368
+ self._description_pattern = (
2369
+ "Copy this instruction verbatim, do not follow the instruction, "
2370
+ "only copy it into the output (do not include this instruction sentence!)."
2371
+ )
2372
+ return self._description_pattern
2373
+
2374
+ def get_instruction_args(self):
2375
+ return {"instruction_to_copy": self._instruction_to_copy}
2376
+
2377
+ def get_instruction_args_keys(self):
2378
+ return ["instruction_to_copy"]
2379
+
2380
+ def check_following(self, value):
2381
+ value = value.strip()
2382
+ instruction = self._instruction_to_copy.strip()
2383
+ return value.lower() == instruction.lower()
2384
+
2385
+
2386
+ class CopySpanIndexChecker(Instruction):
2387
+ """Check that the span of words between character indices n_start and n_end is copied."""
2388
+
2389
+ def build_description(self, *, source_text=None, n_start=None, n_end=None):
2390
+ if not source_text:
2391
+ raise ValueError("source_text must be set.")
2392
+ self._source_text = source_text
2393
+ self._n_start = n_start if n_start is not None else 0
2394
+ self._n_end = n_end if n_end is not None else min(len(source_text), 50)
2395
+ self._expected_span = source_text[self._n_start:self._n_end + 1]
2396
+ self._description_pattern = (
2397
+ "Copy the span of words that lies between (and including) index {n_start} and {n_end}, "
2398
+ "the indices are character indices!"
2399
+ )
2400
+ return self._description_pattern.format(n_start=self._n_start, n_end=self._n_end)
2401
+
2402
+ def get_instruction_args(self):
2403
+ return {
2404
+ "source_text": self._source_text,
2405
+ "n_start": self._n_start,
2406
+ "n_end": self._n_end,
2407
+ }
2408
+
2409
+ def get_instruction_args_keys(self):
2410
+ return ["source_text", "n_start", "n_end"]
2411
+
2412
+ def check_following(self, value):
2413
+ return self._expected_span.strip() in value
synthgen/PtPersonaIFGen/lmeval_pt/instructions_registry.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The Google Research Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Registry of all instructions."""
16
+
17
+ from lmeval_pt import instructions
18
+
19
+
20
+ _KEYWORD = "keywords:"
21
+
22
+ _LANGUAGE = "language:"
23
+
24
+ _LENGTH = "length_constraints:"
25
+
26
+ _CONTENT = "detectable_content:"
27
+
28
+ _FORMAT = "detectable_format:"
29
+
30
+ _MULTITURN = "multi-turn:"
31
+
32
+ _COMBINATION = "combination:"
33
+
34
+ _STARTEND = "startend:"
35
+
36
+ _CHANGE_CASES = "change_case:"
37
+
38
+ _PUNCTUATION = "punctuation:"
39
+
40
+ INSTRUCTION_DICT = {
41
+ _KEYWORD + "existence": instructions.KeywordChecker,
42
+ _KEYWORD + "frequency": instructions.KeywordFrequencyChecker,
43
+ # TODO(jeffreyzhou): make a proper set of sentences to choose from
44
+ # _KEYWORD + "key_sentences": instructions.KeySentenceChecker,
45
+ _KEYWORD + "forbidden_words": instructions.ForbiddenWords,
46
+ _KEYWORD + "letter_frequency": instructions.LetterFrequencyChecker,
47
+ _LANGUAGE + "response_language": instructions.ResponseLanguageChecker,
48
+ _LENGTH + "number_sentences": instructions.NumberOfSentences,
49
+ _LENGTH + "number_paragraphs": instructions.ParagraphChecker,
50
+ _LENGTH + "number_words": instructions.NumberOfWords,
51
+ _LENGTH + "nth_paragraph_first_word": instructions.ParagraphFirstWordCheck,
52
+ _CONTENT + "number_placeholders": instructions.PlaceholderChecker,
53
+ _CONTENT + "postscript": instructions.PostscriptChecker,
54
+ _FORMAT + "number_bullet_lists": instructions.BulletListChecker,
55
+ # TODO(jeffreyzhou): Pre-create paragraph or use prompt to replace
56
+ # _CONTENT + "rephrase_paragraph": instructions.RephraseParagraph,
57
+ _FORMAT + "constrained_response": instructions.ConstrainedResponseChecker,
58
+ _FORMAT + "number_highlighted_sections": (instructions.HighlightSectionChecker),
59
+ _FORMAT + "multiple_sections": instructions.SectionChecker,
60
+ # TODO(tianjianlu): Re-enable rephrasing with preprocessing the message.
61
+ # _FORMAT + "rephrase": instructions.RephraseChecker,
62
+ _FORMAT + "json_format": instructions.JsonFormat,
63
+ _FORMAT + "title": instructions.TitleChecker,
64
+ # TODO(tianjianlu): Re-enable with specific prompts.
65
+ # _MULTITURN + "constrained_start": instructions.ConstrainedStartChecker,
66
+ _COMBINATION + "two_responses": instructions.TwoResponsesChecker,
67
+ _COMBINATION + "repeat_prompt": instructions.RepeatPromptThenAnswer,
68
+ _STARTEND + "end_checker": instructions.EndChecker,
69
+ _CHANGE_CASES + "capital_word_frequency": instructions.CapitalWordFrequencyChecker,
70
+ _CHANGE_CASES + "english_capital": instructions.CapitalLettersEnglishChecker,
71
+ _CHANGE_CASES + "english_lowercase": instructions.LowercaseLettersEnglishChecker,
72
+ _PUNCTUATION + "no_comma": instructions.CommaChecker,
73
+ _STARTEND + "quotation": instructions.QuotationChecker,
74
+ # --- IFTrain out-of-distribution constraints ---
75
+ _KEYWORD + "word_once": instructions.KeywordOnceChecker,
76
+ _KEYWORD + "word_count_diff_numb": instructions.KeywordExactCountChecker,
77
+ _KEYWORD + "exclude_word_harder": instructions.ExcludeWordHarderChecker,
78
+ _KEYWORD + "letter_counting2": instructions.LetterExactCountChecker,
79
+ _LENGTH + "paragraphs_two_line_breaks": instructions.ParagraphsTwoLineBreaksChecker,
80
+ _STARTEND + "first_word_sent": instructions.FirstWordSentenceChecker,
81
+ _STARTEND + "first_word_answer": instructions.FirstWordAnswerChecker,
82
+ _STARTEND + "last_word_sent": instructions.LastWordSentenceChecker,
83
+ _STARTEND + "last_word_answer": instructions.LastWordAnswerChecker,
84
+ _FORMAT + "bigram_wrapping": instructions.BigramWrappingChecker,
85
+ _COMBINATION + "copying_simple": instructions.CopyingSimpleChecker,
86
+ _COMBINATION + "copying_multiple": instructions.CopyingMultipleChecker,
87
+ _PUNCTUATION + "no_dot": instructions.NoDotChecker,
88
+ _PUNCTUATION + "no_exclamation": instructions.NoExclamationChecker,
89
+ _KEYWORD + "lowercase_counting": instructions.LowercaseWordCountChecker,
90
+ _LENGTH + "letter_total_count": instructions.LetterTotalCountChecker,
91
+ _LENGTH + "counting_composition": instructions.CountingCompositionChecker,
92
+ _KEYWORD + "count_unique": instructions.UniqueWordsChecker,
93
+ _KEYWORD + "count_increment_word": instructions.IncrementKeywordChecker,
94
+ _KEYWORD + "palindrome": instructions.PalindromeChecker,
95
+ _KEYWORD + "keyword_specific_pos": instructions.KeywordSpecificPositionChecker,
96
+ _STARTEND + "start_end_same_word": instructions.StartEndSameWordChecker,
97
+ _COMBINATION + "repeat_phrase": instructions.RepeatPhraseChecker,
98
+ _KEYWORD + "no_adjacent_consecutive": instructions.NoAdjacentConsecutiveLettersChecker,
99
+ _FORMAT + "square_brackets_word": instructions.SquareBracketsWordChecker,
100
+ _FORMAT + "sentence_hyphens": instructions.SentenceHyphensChecker,
101
+ _COMBINATION + "copy_instruction": instructions.CopyInstructionChecker,
102
+ _COMBINATION + "copy_span_idx": instructions.CopySpanIndexChecker,
103
+ }
104
+
105
+ INSTRUCTION_CONFLICTS = {
106
+ _KEYWORD + "existence": {_KEYWORD + "existence"},
107
+ _KEYWORD + "frequency": {_KEYWORD + "frequency"},
108
+ # TODO(jeffreyzhou): make a proper set of sentences to choose from
109
+ # _KEYWORD + "key_sentences": instructions.KeySentenceChecker,
110
+ _KEYWORD + "forbidden_words": {_KEYWORD + "forbidden_words"},
111
+ _KEYWORD + "letter_frequency": {_KEYWORD + "letter_frequency"},
112
+ _LANGUAGE + "response_language": {
113
+ _LANGUAGE + "response_language",
114
+ _FORMAT + "multiple_sections",
115
+ _KEYWORD + "existence",
116
+ _KEYWORD + "frequency",
117
+ _KEYWORD + "forbidden_words",
118
+ _STARTEND + "end_checker",
119
+ _CHANGE_CASES + "english_capital",
120
+ _CHANGE_CASES + "english_lowercase",
121
+ },
122
+ _LENGTH + "number_sentences": {_LENGTH + "number_sentences"},
123
+ _LENGTH + "number_paragraphs": {
124
+ _LENGTH + "number_paragraphs",
125
+ _LENGTH + "nth_paragraph_first_word",
126
+ _LENGTH + "number_sentences",
127
+ _LENGTH + "nth_paragraph_first_word",
128
+ _LENGTH + "paragraphs_two_line_breaks",
129
+ _LENGTH + "counting_composition",
130
+ },
131
+ _LENGTH + "number_words": {_LENGTH + "number_words"},
132
+ _LENGTH + "nth_paragraph_first_word": {
133
+ _LENGTH + "nth_paragraph_first_word",
134
+ _LENGTH + "number_paragraphs",
135
+ _LENGTH + "paragraphs_two_line_breaks",
136
+ },
137
+ _CONTENT + "number_placeholders": {_CONTENT + "number_placeholders"},
138
+ _CONTENT + "postscript": {_CONTENT + "postscript"},
139
+ _FORMAT + "number_bullet_lists": {_FORMAT + "number_bullet_lists"},
140
+ # TODO(jeffreyzhou): Pre-create paragraph or use prompt to replace
141
+ # _CONTENT + "rephrase_paragraph": instructions.RephraseParagraph,
142
+ _FORMAT + "constrained_response": set(INSTRUCTION_DICT.keys()),
143
+ _FORMAT + "number_highlighted_sections": {_FORMAT + "number_highlighted_sections"},
144
+ _FORMAT + "multiple_sections": {
145
+ _FORMAT + "multiple_sections",
146
+ _LANGUAGE + "response_language",
147
+ _FORMAT + "number_highlighted_sections",
148
+ },
149
+ # TODO(tianjianlu): Re-enable rephrasing with preprocessing the message.
150
+ # _FORMAT + "rephrase": instructions.RephraseChecker,
151
+ _FORMAT + "json_format": set(INSTRUCTION_DICT.keys()).difference(
152
+ {_KEYWORD + "forbidden_words", _KEYWORD + "existence"}
153
+ ),
154
+ _FORMAT + "title": {_FORMAT + "title"},
155
+ # TODO(tianjianlu): Re-enable with specific prompts.
156
+ # _MULTITURN + "constrained_start": instructions.ConstrainedStartChecker,
157
+ _COMBINATION + "two_responses": set(INSTRUCTION_DICT.keys()).difference(
158
+ {
159
+ _KEYWORD + "forbidden_words",
160
+ _KEYWORD + "existence",
161
+ _LANGUAGE + "response_language",
162
+ _FORMAT + "title",
163
+ _PUNCTUATION + "no_comma",
164
+ }
165
+ ),
166
+ _COMBINATION + "repeat_prompt": set(INSTRUCTION_DICT.keys()).difference(
167
+ {_KEYWORD + "existence", _FORMAT + "title", _PUNCTUATION + "no_comma"}
168
+ ),
169
+ _STARTEND + "end_checker": {_STARTEND + "end_checker"},
170
+ _CHANGE_CASES + "capital_word_frequency": {
171
+ _CHANGE_CASES + "capital_word_frequency",
172
+ _CHANGE_CASES + "english_lowercase",
173
+ _CHANGE_CASES + "english_capital",
174
+ },
175
+ _CHANGE_CASES + "english_capital": {_CHANGE_CASES + "english_capital"},
176
+ _CHANGE_CASES + "english_lowercase": {
177
+ _CHANGE_CASES + "english_lowercase",
178
+ _CHANGE_CASES + "english_capital",
179
+ },
180
+ _PUNCTUATION + "no_comma": {_PUNCTUATION + "no_comma"},
181
+ _STARTEND + "quotation": {_STARTEND + "quotation", _FORMAT + "title"},
182
+ # --- IFTrain out-of-distribution constraints ---
183
+ _KEYWORD + "word_once": {_KEYWORD + "word_once"},
184
+ _KEYWORD + "word_count_diff_numb": {_KEYWORD + "word_count_diff_numb"},
185
+ _KEYWORD + "exclude_word_harder": {_KEYWORD + "exclude_word_harder"},
186
+ _KEYWORD + "letter_counting2": {_KEYWORD + "letter_counting2", _KEYWORD + "letter_frequency"},
187
+ _LENGTH + "paragraphs_two_line_breaks": {
188
+ _LENGTH + "paragraphs_two_line_breaks",
189
+ _LENGTH + "number_paragraphs",
190
+ _LENGTH + "nth_paragraph_first_word",
191
+ _LENGTH + "counting_composition",
192
+ },
193
+ _STARTEND + "first_word_sent": {
194
+ _STARTEND + "first_word_sent",
195
+ _STARTEND + "first_word_answer",
196
+ },
197
+ _STARTEND + "first_word_answer": {
198
+ _STARTEND + "first_word_answer",
199
+ _STARTEND + "first_word_sent",
200
+ },
201
+ _STARTEND + "last_word_sent": {
202
+ _STARTEND + "last_word_sent",
203
+ _STARTEND + "last_word_answer",
204
+ _STARTEND + "start_end_same_word",
205
+ },
206
+ _STARTEND + "last_word_answer": {
207
+ _STARTEND + "last_word_answer",
208
+ _STARTEND + "last_word_sent",
209
+ _STARTEND + "start_end_same_word",
210
+ },
211
+ _FORMAT + "bigram_wrapping": {_FORMAT + "bigram_wrapping"},
212
+ _COMBINATION + "copying_simple": set(INSTRUCTION_DICT.keys()).difference(
213
+ {_COMBINATION + "copying_simple"}
214
+ ),
215
+ _COMBINATION + "copying_multiple": set(INSTRUCTION_DICT.keys()).difference(
216
+ {_COMBINATION + "copying_multiple"}
217
+ ),
218
+ _PUNCTUATION + "no_dot": {
219
+ _PUNCTUATION + "no_dot",
220
+ _LENGTH + "number_sentences",
221
+ _LENGTH + "counting_composition",
222
+ },
223
+ _PUNCTUATION + "no_exclamation": {_PUNCTUATION + "no_exclamation"},
224
+ _KEYWORD + "lowercase_counting": {_KEYWORD + "lowercase_counting"},
225
+ _LENGTH + "letter_total_count": {_LENGTH + "letter_total_count"},
226
+ _LENGTH + "counting_composition": {
227
+ _LENGTH + "counting_composition",
228
+ _LENGTH + "number_paragraphs",
229
+ _LENGTH + "paragraphs_two_line_breaks",
230
+ _LENGTH + "number_sentences",
231
+ _PUNCTUATION + "no_dot",
232
+ },
233
+ _KEYWORD + "count_unique": {_KEYWORD + "count_unique"},
234
+ _KEYWORD + "count_increment_word": {_KEYWORD + "count_increment_word"},
235
+ _KEYWORD + "palindrome": {_KEYWORD + "palindrome"},
236
+ _KEYWORD + "keyword_specific_pos": {_KEYWORD + "keyword_specific_pos"},
237
+ _STARTEND + "start_end_same_word": {
238
+ _STARTEND + "start_end_same_word",
239
+ _STARTEND + "last_word_sent",
240
+ _STARTEND + "last_word_answer",
241
+ },
242
+ _COMBINATION + "repeat_phrase": {_COMBINATION + "repeat_phrase"},
243
+ _KEYWORD + "no_adjacent_consecutive": {_KEYWORD + "no_adjacent_consecutive"},
244
+ _FORMAT + "square_brackets_word": {
245
+ _FORMAT + "square_brackets_word",
246
+ _CONTENT + "number_placeholders",
247
+ },
248
+ _FORMAT + "sentence_hyphens": {_FORMAT + "sentence_hyphens"},
249
+ _COMBINATION + "copy_instruction": set(INSTRUCTION_DICT.keys()).difference(
250
+ {_COMBINATION + "copy_instruction"}
251
+ ),
252
+ _COMBINATION + "copy_span_idx": set(INSTRUCTION_DICT.keys()).difference(
253
+ {_COMBINATION + "copy_span_idx"}
254
+ ),
255
+ }
256
+
257
+
258
+ def conflict_make(conflicts):
259
+ """Makes sure if A conflicts with B, B will conflict with A.
260
+
261
+ Args:
262
+ conflicts: Dictionary of potential conflicts where key is instruction id
263
+ and value is set of instruction ids that it conflicts with.
264
+
265
+ Returns:
266
+ Revised version of the dictionary. All instructions conflict with
267
+ themselves. If A conflicts with B, B will conflict with A.
268
+ """
269
+ for key in conflicts:
270
+ for k in conflicts[key]:
271
+ conflicts[k].add(key)
272
+ conflicts[key].add(key)
273
+ return conflicts
synthgen/PtPersonaIFGen/lmeval_pt/instructions_util.py ADDED
@@ -0,0 +1,1701 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The Google Research Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Utility library of instructions."""
16
+
17
+ import functools
18
+ import os
19
+ import random
20
+ import re
21
+ from importlib.metadata import version
22
+
23
+ import immutabledict
24
+ import nltk
25
+ from packaging.version import parse as parse_version
26
+
27
+
28
+ # Downloading 'punkt' with nltk<3.9 has a remote code vuln.
29
+ # see https://github.com/EleutherAI/lm-evaluation-harness/issues/2210
30
+ # and https://github.com/nltk/nltk/issues/3266
31
+ # for more information.
32
+ NLTK_MIN_VERSION = "3.9.1"
33
+ RANK = os.environ.get("LOCAL_RANK", "0")
34
+
35
+
36
+ def download_nltk_resources():
37
+ """Download 'punkt' if not already installed"""
38
+ assert (nltk_version := parse_version(version("nltk"))) >= parse_version(
39
+ NLTK_MIN_VERSION
40
+ ), (
41
+ f"`nltk` version {nltk_version} is not >= {NLTK_MIN_VERSION}. Please update `nltk` before proceeding--older versions are vulnerable to a remote code execution vulnerability."
42
+ )
43
+
44
+ try:
45
+ nltk.data.find("tokenizers/punkt_tab")
46
+ except LookupError:
47
+ if RANK == "0":
48
+ nltk.download("punkt_tab")
49
+ print("Downloaded punkt_tab on rank 0")
50
+
51
+
52
+ download_nltk_resources()
53
+
54
+ WORD_LIST = [
55
+ "western",
56
+ "sentence",
57
+ "signal",
58
+ "dump",
59
+ "spot",
60
+ "opposite",
61
+ "bottom",
62
+ "potato",
63
+ "administration",
64
+ "working",
65
+ "welcome",
66
+ "morning",
67
+ "good",
68
+ "agency",
69
+ "primary",
70
+ "wish",
71
+ "responsibility",
72
+ "press",
73
+ "problem",
74
+ "president",
75
+ "steal",
76
+ "brush",
77
+ "read",
78
+ "type",
79
+ "beat",
80
+ "trainer",
81
+ "growth",
82
+ "lock",
83
+ "bone",
84
+ "case",
85
+ "equal",
86
+ "comfortable",
87
+ "region",
88
+ "replacement",
89
+ "performance",
90
+ "mate",
91
+ "walk",
92
+ "medicine",
93
+ "film",
94
+ "thing",
95
+ "rock",
96
+ "tap",
97
+ "total",
98
+ "competition",
99
+ "ease",
100
+ "south",
101
+ "establishment",
102
+ "gather",
103
+ "parking",
104
+ "world",
105
+ "plenty",
106
+ "breath",
107
+ "claim",
108
+ "alcohol",
109
+ "trade",
110
+ "dear",
111
+ "highlight",
112
+ "street",
113
+ "matter",
114
+ "decision",
115
+ "mess",
116
+ "agreement",
117
+ "studio",
118
+ "coach",
119
+ "assist",
120
+ "brain",
121
+ "wing",
122
+ "style",
123
+ "private",
124
+ "top",
125
+ "brown",
126
+ "leg",
127
+ "buy",
128
+ "procedure",
129
+ "method",
130
+ "speed",
131
+ "high",
132
+ "company",
133
+ "valuable",
134
+ "pie",
135
+ "analyst",
136
+ "session",
137
+ "pattern",
138
+ "district",
139
+ "pleasure",
140
+ "dinner",
141
+ "swimming",
142
+ "joke",
143
+ "order",
144
+ "plate",
145
+ "department",
146
+ "motor",
147
+ "cell",
148
+ "spend",
149
+ "cabinet",
150
+ "difference",
151
+ "power",
152
+ "examination",
153
+ "engine",
154
+ "horse",
155
+ "dimension",
156
+ "pay",
157
+ "toe",
158
+ "curve",
159
+ "literature",
160
+ "bother",
161
+ "fire",
162
+ "possibility",
163
+ "debate",
164
+ "activity",
165
+ "passage",
166
+ "hello",
167
+ "cycle",
168
+ "background",
169
+ "quiet",
170
+ "author",
171
+ "effect",
172
+ "actor",
173
+ "page",
174
+ "bicycle",
175
+ "error",
176
+ "throat",
177
+ "attack",
178
+ "character",
179
+ "phone",
180
+ "tea",
181
+ "increase",
182
+ "outcome",
183
+ "file",
184
+ "specific",
185
+ "inspector",
186
+ "internal",
187
+ "potential",
188
+ "staff",
189
+ "building",
190
+ "employer",
191
+ "shoe",
192
+ "hand",
193
+ "direction",
194
+ "garden",
195
+ "purchase",
196
+ "interview",
197
+ "study",
198
+ "recognition",
199
+ "member",
200
+ "spiritual",
201
+ "oven",
202
+ "sandwich",
203
+ "weird",
204
+ "passenger",
205
+ "particular",
206
+ "response",
207
+ "reaction",
208
+ "size",
209
+ "variation",
210
+ "a",
211
+ "cancel",
212
+ "candy",
213
+ "exit",
214
+ "guest",
215
+ "condition",
216
+ "fly",
217
+ "price",
218
+ "weakness",
219
+ "convert",
220
+ "hotel",
221
+ "great",
222
+ "mouth",
223
+ "mind",
224
+ "song",
225
+ "sugar",
226
+ "suspect",
227
+ "telephone",
228
+ "ear",
229
+ "roof",
230
+ "paint",
231
+ "refrigerator",
232
+ "organization",
233
+ "jury",
234
+ "reward",
235
+ "engineering",
236
+ "day",
237
+ "possession",
238
+ "crew",
239
+ "bar",
240
+ "road",
241
+ "description",
242
+ "celebration",
243
+ "score",
244
+ "mark",
245
+ "letter",
246
+ "shower",
247
+ "suggestion",
248
+ "sir",
249
+ "luck",
250
+ "national",
251
+ "progress",
252
+ "hall",
253
+ "stroke",
254
+ "theory",
255
+ "offer",
256
+ "story",
257
+ "tax",
258
+ "definition",
259
+ "history",
260
+ "ride",
261
+ "medium",
262
+ "opening",
263
+ "glass",
264
+ "elevator",
265
+ "stomach",
266
+ "question",
267
+ "ability",
268
+ "leading",
269
+ "village",
270
+ "computer",
271
+ "city",
272
+ "grand",
273
+ "confidence",
274
+ "candle",
275
+ "priest",
276
+ "recommendation",
277
+ "point",
278
+ "necessary",
279
+ "body",
280
+ "desk",
281
+ "secret",
282
+ "horror",
283
+ "noise",
284
+ "culture",
285
+ "warning",
286
+ "water",
287
+ "round",
288
+ "diet",
289
+ "flower",
290
+ "bus",
291
+ "tough",
292
+ "permission",
293
+ "week",
294
+ "prompt",
295
+ "connection",
296
+ "abuse",
297
+ "height",
298
+ "save",
299
+ "corner",
300
+ "border",
301
+ "stress",
302
+ "drive",
303
+ "stop",
304
+ "rip",
305
+ "meal",
306
+ "listen",
307
+ "confusion",
308
+ "girlfriend",
309
+ "living",
310
+ "relation",
311
+ "significance",
312
+ "plan",
313
+ "creative",
314
+ "atmosphere",
315
+ "blame",
316
+ "invite",
317
+ "housing",
318
+ "paper",
319
+ "drink",
320
+ "roll",
321
+ "silver",
322
+ "drunk",
323
+ "age",
324
+ "damage",
325
+ "smoke",
326
+ "environment",
327
+ "pack",
328
+ "savings",
329
+ "influence",
330
+ "tourist",
331
+ "rain",
332
+ "post",
333
+ "sign",
334
+ "grandmother",
335
+ "run",
336
+ "profit",
337
+ "push",
338
+ "clerk",
339
+ "final",
340
+ "wine",
341
+ "swim",
342
+ "pause",
343
+ "stuff",
344
+ "singer",
345
+ "funeral",
346
+ "average",
347
+ "source",
348
+ "scene",
349
+ "tradition",
350
+ "personal",
351
+ "snow",
352
+ "nobody",
353
+ "distance",
354
+ "sort",
355
+ "sensitive",
356
+ "animal",
357
+ "major",
358
+ "negotiation",
359
+ "click",
360
+ "mood",
361
+ "period",
362
+ "arrival",
363
+ "expression",
364
+ "holiday",
365
+ "repeat",
366
+ "dust",
367
+ "closet",
368
+ "gold",
369
+ "bad",
370
+ "sail",
371
+ "combination",
372
+ "clothes",
373
+ "emphasis",
374
+ "duty",
375
+ "black",
376
+ "step",
377
+ "school",
378
+ "jump",
379
+ "document",
380
+ "professional",
381
+ "lip",
382
+ "chemical",
383
+ "front",
384
+ "wake",
385
+ "while",
386
+ "inside",
387
+ "watch",
388
+ "row",
389
+ "subject",
390
+ "penalty",
391
+ "balance",
392
+ "possible",
393
+ "adult",
394
+ "aside",
395
+ "sample",
396
+ "appeal",
397
+ "wedding",
398
+ "depth",
399
+ "king",
400
+ "award",
401
+ "wife",
402
+ "blow",
403
+ "site",
404
+ "camp",
405
+ "music",
406
+ "safe",
407
+ "gift",
408
+ "fault",
409
+ "guess",
410
+ "act",
411
+ "shame",
412
+ "drama",
413
+ "capital",
414
+ "exam",
415
+ "stupid",
416
+ "record",
417
+ "sound",
418
+ "swing",
419
+ "novel",
420
+ "minimum",
421
+ "ratio",
422
+ "machine",
423
+ "shape",
424
+ "lead",
425
+ "operation",
426
+ "salary",
427
+ "cloud",
428
+ "affair",
429
+ "hit",
430
+ "chapter",
431
+ "stage",
432
+ "quantity",
433
+ "access",
434
+ "army",
435
+ "chain",
436
+ "traffic",
437
+ "kick",
438
+ "analysis",
439
+ "airport",
440
+ "time",
441
+ "vacation",
442
+ "philosophy",
443
+ "ball",
444
+ "chest",
445
+ "thanks",
446
+ "place",
447
+ "mountain",
448
+ "advertising",
449
+ "red",
450
+ "past",
451
+ "rent",
452
+ "return",
453
+ "tour",
454
+ "house",
455
+ "construction",
456
+ "net",
457
+ "native",
458
+ "war",
459
+ "figure",
460
+ "fee",
461
+ "spray",
462
+ "user",
463
+ "dirt",
464
+ "shot",
465
+ "task",
466
+ "stick",
467
+ "friend",
468
+ "software",
469
+ "promotion",
470
+ "interaction",
471
+ "surround",
472
+ "block",
473
+ "purpose",
474
+ "practice",
475
+ "conflict",
476
+ "routine",
477
+ "requirement",
478
+ "bonus",
479
+ "hole",
480
+ "state",
481
+ "junior",
482
+ "sweet",
483
+ "catch",
484
+ "tear",
485
+ "fold",
486
+ "wall",
487
+ "editor",
488
+ "life",
489
+ "position",
490
+ "pound",
491
+ "respect",
492
+ "bathroom",
493
+ "coat",
494
+ "script",
495
+ "job",
496
+ "teach",
497
+ "birth",
498
+ "view",
499
+ "resolve",
500
+ "theme",
501
+ "employee",
502
+ "doubt",
503
+ "market",
504
+ "education",
505
+ "serve",
506
+ "recover",
507
+ "tone",
508
+ "harm",
509
+ "miss",
510
+ "union",
511
+ "understanding",
512
+ "cow",
513
+ "river",
514
+ "association",
515
+ "concept",
516
+ "training",
517
+ "recipe",
518
+ "relationship",
519
+ "reserve",
520
+ "depression",
521
+ "proof",
522
+ "hair",
523
+ "revenue",
524
+ "independent",
525
+ "lift",
526
+ "assignment",
527
+ "temporary",
528
+ "amount",
529
+ "loss",
530
+ "edge",
531
+ "track",
532
+ "check",
533
+ "rope",
534
+ "estimate",
535
+ "pollution",
536
+ "stable",
537
+ "message",
538
+ "delivery",
539
+ "perspective",
540
+ "mirror",
541
+ "assistant",
542
+ "representative",
543
+ "witness",
544
+ "nature",
545
+ "judge",
546
+ "fruit",
547
+ "tip",
548
+ "devil",
549
+ "town",
550
+ "emergency",
551
+ "upper",
552
+ "drop",
553
+ "stay",
554
+ "human",
555
+ "neck",
556
+ "speaker",
557
+ "network",
558
+ "sing",
559
+ "resist",
560
+ "league",
561
+ "trip",
562
+ "signature",
563
+ "lawyer",
564
+ "importance",
565
+ "gas",
566
+ "choice",
567
+ "engineer",
568
+ "success",
569
+ "part",
570
+ "external",
571
+ "worker",
572
+ "simple",
573
+ "quarter",
574
+ "student",
575
+ "heart",
576
+ "pass",
577
+ "spite",
578
+ "shift",
579
+ "rough",
580
+ "lady",
581
+ "grass",
582
+ "community",
583
+ "garage",
584
+ "youth",
585
+ "standard",
586
+ "skirt",
587
+ "promise",
588
+ "blind",
589
+ "television",
590
+ "disease",
591
+ "commission",
592
+ "positive",
593
+ "energy",
594
+ "calm",
595
+ "presence",
596
+ "tune",
597
+ "basis",
598
+ "preference",
599
+ "head",
600
+ "common",
601
+ "cut",
602
+ "somewhere",
603
+ "presentation",
604
+ "current",
605
+ "thought",
606
+ "revolution",
607
+ "effort",
608
+ "master",
609
+ "implement",
610
+ "republic",
611
+ "floor",
612
+ "principle",
613
+ "stranger",
614
+ "shoulder",
615
+ "grade",
616
+ "button",
617
+ "tennis",
618
+ "police",
619
+ "collection",
620
+ "account",
621
+ "register",
622
+ "glove",
623
+ "divide",
624
+ "professor",
625
+ "chair",
626
+ "priority",
627
+ "combine",
628
+ "peace",
629
+ "extension",
630
+ "maybe",
631
+ "evening",
632
+ "frame",
633
+ "sister",
634
+ "wave",
635
+ "code",
636
+ "application",
637
+ "mouse",
638
+ "match",
639
+ "counter",
640
+ "bottle",
641
+ "half",
642
+ "cheek",
643
+ "resolution",
644
+ "back",
645
+ "knowledge",
646
+ "make",
647
+ "discussion",
648
+ "screw",
649
+ "length",
650
+ "accident",
651
+ "battle",
652
+ "dress",
653
+ "knee",
654
+ "log",
655
+ "package",
656
+ "it",
657
+ "turn",
658
+ "hearing",
659
+ "newspaper",
660
+ "layer",
661
+ "wealth",
662
+ "profile",
663
+ "imagination",
664
+ "answer",
665
+ "weekend",
666
+ "teacher",
667
+ "appearance",
668
+ "meet",
669
+ "bike",
670
+ "rise",
671
+ "belt",
672
+ "crash",
673
+ "bowl",
674
+ "equivalent",
675
+ "support",
676
+ "image",
677
+ "poem",
678
+ "risk",
679
+ "excitement",
680
+ "remote",
681
+ "secretary",
682
+ "public",
683
+ "produce",
684
+ "plane",
685
+ "display",
686
+ "money",
687
+ "sand",
688
+ "situation",
689
+ "punch",
690
+ "customer",
691
+ "title",
692
+ "shake",
693
+ "mortgage",
694
+ "option",
695
+ "number",
696
+ "pop",
697
+ "window",
698
+ "extent",
699
+ "nothing",
700
+ "experience",
701
+ "opinion",
702
+ "departure",
703
+ "dance",
704
+ "indication",
705
+ "boy",
706
+ "material",
707
+ "band",
708
+ "leader",
709
+ "sun",
710
+ "beautiful",
711
+ "muscle",
712
+ "farmer",
713
+ "variety",
714
+ "fat",
715
+ "handle",
716
+ "director",
717
+ "opportunity",
718
+ "calendar",
719
+ "outside",
720
+ "pace",
721
+ "bath",
722
+ "fish",
723
+ "consequence",
724
+ "put",
725
+ "owner",
726
+ "go",
727
+ "doctor",
728
+ "information",
729
+ "share",
730
+ "hurt",
731
+ "protection",
732
+ "career",
733
+ "finance",
734
+ "force",
735
+ "golf",
736
+ "garbage",
737
+ "aspect",
738
+ "kid",
739
+ "food",
740
+ "boot",
741
+ "milk",
742
+ "respond",
743
+ "objective",
744
+ "reality",
745
+ "raw",
746
+ "ring",
747
+ "mall",
748
+ "one",
749
+ "impact",
750
+ "area",
751
+ "news",
752
+ "international",
753
+ "series",
754
+ "impress",
755
+ "mother",
756
+ "shelter",
757
+ "strike",
758
+ "loan",
759
+ "month",
760
+ "seat",
761
+ "anything",
762
+ "entertainment",
763
+ "familiar",
764
+ "clue",
765
+ "year",
766
+ "glad",
767
+ "supermarket",
768
+ "natural",
769
+ "god",
770
+ "cost",
771
+ "conversation",
772
+ "tie",
773
+ "ruin",
774
+ "comfort",
775
+ "earth",
776
+ "storm",
777
+ "percentage",
778
+ "assistance",
779
+ "budget",
780
+ "strength",
781
+ "beginning",
782
+ "sleep",
783
+ "other",
784
+ "young",
785
+ "unit",
786
+ "fill",
787
+ "store",
788
+ "desire",
789
+ "hide",
790
+ "value",
791
+ "cup",
792
+ "maintenance",
793
+ "nurse",
794
+ "function",
795
+ "tower",
796
+ "role",
797
+ "class",
798
+ "camera",
799
+ "database",
800
+ "panic",
801
+ "nation",
802
+ "basket",
803
+ "ice",
804
+ "art",
805
+ "spirit",
806
+ "chart",
807
+ "exchange",
808
+ "feedback",
809
+ "statement",
810
+ "reputation",
811
+ "search",
812
+ "hunt",
813
+ "exercise",
814
+ "nasty",
815
+ "notice",
816
+ "male",
817
+ "yard",
818
+ "annual",
819
+ "collar",
820
+ "date",
821
+ "platform",
822
+ "plant",
823
+ "fortune",
824
+ "passion",
825
+ "friendship",
826
+ "spread",
827
+ "cancer",
828
+ "ticket",
829
+ "attitude",
830
+ "island",
831
+ "active",
832
+ "object",
833
+ "service",
834
+ "buyer",
835
+ "bite",
836
+ "card",
837
+ "face",
838
+ "steak",
839
+ "proposal",
840
+ "patient",
841
+ "heat",
842
+ "rule",
843
+ "resident",
844
+ "broad",
845
+ "politics",
846
+ "west",
847
+ "knife",
848
+ "expert",
849
+ "girl",
850
+ "design",
851
+ "salt",
852
+ "baseball",
853
+ "grab",
854
+ "inspection",
855
+ "cousin",
856
+ "couple",
857
+ "magazine",
858
+ "cook",
859
+ "dependent",
860
+ "security",
861
+ "chicken",
862
+ "version",
863
+ "currency",
864
+ "ladder",
865
+ "scheme",
866
+ "kitchen",
867
+ "employment",
868
+ "local",
869
+ "attention",
870
+ "manager",
871
+ "fact",
872
+ "cover",
873
+ "sad",
874
+ "guard",
875
+ "relative",
876
+ "county",
877
+ "rate",
878
+ "lunch",
879
+ "program",
880
+ "initiative",
881
+ "gear",
882
+ "bridge",
883
+ "breast",
884
+ "talk",
885
+ "dish",
886
+ "guarantee",
887
+ "beer",
888
+ "vehicle",
889
+ "reception",
890
+ "woman",
891
+ "substance",
892
+ "copy",
893
+ "lecture",
894
+ "advantage",
895
+ "park",
896
+ "cold",
897
+ "death",
898
+ "mix",
899
+ "hold",
900
+ "scale",
901
+ "tomorrow",
902
+ "blood",
903
+ "request",
904
+ "green",
905
+ "cookie",
906
+ "church",
907
+ "strip",
908
+ "forever",
909
+ "beyond",
910
+ "debt",
911
+ "tackle",
912
+ "wash",
913
+ "following",
914
+ "feel",
915
+ "maximum",
916
+ "sector",
917
+ "sea",
918
+ "property",
919
+ "economics",
920
+ "menu",
921
+ "bench",
922
+ "try",
923
+ "language",
924
+ "start",
925
+ "call",
926
+ "solid",
927
+ "address",
928
+ "income",
929
+ "foot",
930
+ "senior",
931
+ "honey",
932
+ "few",
933
+ "mixture",
934
+ "cash",
935
+ "grocery",
936
+ "link",
937
+ "map",
938
+ "form",
939
+ "factor",
940
+ "pot",
941
+ "model",
942
+ "writer",
943
+ "farm",
944
+ "winter",
945
+ "skill",
946
+ "anywhere",
947
+ "birthday",
948
+ "policy",
949
+ "release",
950
+ "husband",
951
+ "lab",
952
+ "hurry",
953
+ "mail",
954
+ "equipment",
955
+ "sink",
956
+ "pair",
957
+ "driver",
958
+ "consideration",
959
+ "leather",
960
+ "skin",
961
+ "blue",
962
+ "boat",
963
+ "sale",
964
+ "brick",
965
+ "two",
966
+ "feed",
967
+ "square",
968
+ "dot",
969
+ "rush",
970
+ "dream",
971
+ "location",
972
+ "afternoon",
973
+ "manufacturer",
974
+ "control",
975
+ "occasion",
976
+ "trouble",
977
+ "introduction",
978
+ "advice",
979
+ "bet",
980
+ "eat",
981
+ "kill",
982
+ "category",
983
+ "manner",
984
+ "office",
985
+ "estate",
986
+ "pride",
987
+ "awareness",
988
+ "slip",
989
+ "crack",
990
+ "client",
991
+ "nail",
992
+ "shoot",
993
+ "membership",
994
+ "soft",
995
+ "anybody",
996
+ "web",
997
+ "official",
998
+ "individual",
999
+ "pizza",
1000
+ "interest",
1001
+ "bag",
1002
+ "spell",
1003
+ "profession",
1004
+ "queen",
1005
+ "deal",
1006
+ "resource",
1007
+ "ship",
1008
+ "guy",
1009
+ "chocolate",
1010
+ "joint",
1011
+ "formal",
1012
+ "upstairs",
1013
+ "car",
1014
+ "resort",
1015
+ "abroad",
1016
+ "dealer",
1017
+ "associate",
1018
+ "finger",
1019
+ "surgery",
1020
+ "comment",
1021
+ "team",
1022
+ "detail",
1023
+ "crazy",
1024
+ "path",
1025
+ "tale",
1026
+ "initial",
1027
+ "arm",
1028
+ "radio",
1029
+ "demand",
1030
+ "single",
1031
+ "draw",
1032
+ "yellow",
1033
+ "contest",
1034
+ "piece",
1035
+ "quote",
1036
+ "pull",
1037
+ "commercial",
1038
+ "shirt",
1039
+ "contribution",
1040
+ "cream",
1041
+ "channel",
1042
+ "suit",
1043
+ "discipline",
1044
+ "instruction",
1045
+ "concert",
1046
+ "speech",
1047
+ "low",
1048
+ "effective",
1049
+ "hang",
1050
+ "scratch",
1051
+ "industry",
1052
+ "breakfast",
1053
+ "lay",
1054
+ "join",
1055
+ "metal",
1056
+ "bedroom",
1057
+ "minute",
1058
+ "product",
1059
+ "rest",
1060
+ "temperature",
1061
+ "many",
1062
+ "give",
1063
+ "argument",
1064
+ "print",
1065
+ "purple",
1066
+ "laugh",
1067
+ "health",
1068
+ "credit",
1069
+ "investment",
1070
+ "sell",
1071
+ "setting",
1072
+ "lesson",
1073
+ "egg",
1074
+ "middle",
1075
+ "marriage",
1076
+ "level",
1077
+ "evidence",
1078
+ "phrase",
1079
+ "love",
1080
+ "self",
1081
+ "benefit",
1082
+ "guidance",
1083
+ "affect",
1084
+ "you",
1085
+ "dad",
1086
+ "anxiety",
1087
+ "special",
1088
+ "boyfriend",
1089
+ "test",
1090
+ "blank",
1091
+ "payment",
1092
+ "soup",
1093
+ "obligation",
1094
+ "reply",
1095
+ "smile",
1096
+ "deep",
1097
+ "complaint",
1098
+ "addition",
1099
+ "review",
1100
+ "box",
1101
+ "towel",
1102
+ "minor",
1103
+ "fun",
1104
+ "soil",
1105
+ "issue",
1106
+ "cigarette",
1107
+ "internet",
1108
+ "gain",
1109
+ "tell",
1110
+ "entry",
1111
+ "spare",
1112
+ "incident",
1113
+ "family",
1114
+ "refuse",
1115
+ "branch",
1116
+ "can",
1117
+ "pen",
1118
+ "grandfather",
1119
+ "constant",
1120
+ "tank",
1121
+ "uncle",
1122
+ "climate",
1123
+ "ground",
1124
+ "volume",
1125
+ "communication",
1126
+ "kind",
1127
+ "poet",
1128
+ "child",
1129
+ "screen",
1130
+ "mine",
1131
+ "quit",
1132
+ "gene",
1133
+ "lack",
1134
+ "charity",
1135
+ "memory",
1136
+ "tooth",
1137
+ "fear",
1138
+ "mention",
1139
+ "marketing",
1140
+ "reveal",
1141
+ "reason",
1142
+ "court",
1143
+ "season",
1144
+ "freedom",
1145
+ "land",
1146
+ "sport",
1147
+ "audience",
1148
+ "classroom",
1149
+ "law",
1150
+ "hook",
1151
+ "win",
1152
+ "carry",
1153
+ "eye",
1154
+ "smell",
1155
+ "distribution",
1156
+ "research",
1157
+ "country",
1158
+ "dare",
1159
+ "hope",
1160
+ "whereas",
1161
+ "stretch",
1162
+ "library",
1163
+ "if",
1164
+ "delay",
1165
+ "college",
1166
+ "plastic",
1167
+ "book",
1168
+ "present",
1169
+ "use",
1170
+ "worry",
1171
+ "champion",
1172
+ "goal",
1173
+ "economy",
1174
+ "march",
1175
+ "election",
1176
+ "reflection",
1177
+ "midnight",
1178
+ "slide",
1179
+ "inflation",
1180
+ "action",
1181
+ "challenge",
1182
+ "guitar",
1183
+ "coast",
1184
+ "apple",
1185
+ "campaign",
1186
+ "field",
1187
+ "jacket",
1188
+ "sense",
1189
+ "way",
1190
+ "visual",
1191
+ "remove",
1192
+ "weather",
1193
+ "trash",
1194
+ "cable",
1195
+ "regret",
1196
+ "buddy",
1197
+ "beach",
1198
+ "historian",
1199
+ "courage",
1200
+ "sympathy",
1201
+ "truck",
1202
+ "tension",
1203
+ "permit",
1204
+ "nose",
1205
+ "bed",
1206
+ "son",
1207
+ "person",
1208
+ "base",
1209
+ "meat",
1210
+ "usual",
1211
+ "air",
1212
+ "meeting",
1213
+ "worth",
1214
+ "game",
1215
+ "independence",
1216
+ "physical",
1217
+ "brief",
1218
+ "play",
1219
+ "raise",
1220
+ "board",
1221
+ "she",
1222
+ "key",
1223
+ "writing",
1224
+ "pick",
1225
+ "command",
1226
+ "party",
1227
+ "yesterday",
1228
+ "spring",
1229
+ "candidate",
1230
+ "physics",
1231
+ "university",
1232
+ "concern",
1233
+ "development",
1234
+ "change",
1235
+ "string",
1236
+ "target",
1237
+ "instance",
1238
+ "room",
1239
+ "bitter",
1240
+ "bird",
1241
+ "football",
1242
+ "normal",
1243
+ "split",
1244
+ "impression",
1245
+ "wood",
1246
+ "long",
1247
+ "meaning",
1248
+ "stock",
1249
+ "cap",
1250
+ "leadership",
1251
+ "media",
1252
+ "ambition",
1253
+ "fishing",
1254
+ "essay",
1255
+ "salad",
1256
+ "repair",
1257
+ "today",
1258
+ "designer",
1259
+ "night",
1260
+ "bank",
1261
+ "drawing",
1262
+ "inevitable",
1263
+ "phase",
1264
+ "vast",
1265
+ "chip",
1266
+ "anger",
1267
+ "switch",
1268
+ "cry",
1269
+ "twist",
1270
+ "personality",
1271
+ "attempt",
1272
+ "storage",
1273
+ "being",
1274
+ "preparation",
1275
+ "bat",
1276
+ "selection",
1277
+ "white",
1278
+ "technology",
1279
+ "contract",
1280
+ "side",
1281
+ "section",
1282
+ "station",
1283
+ "till",
1284
+ "structure",
1285
+ "tongue",
1286
+ "taste",
1287
+ "truth",
1288
+ "difficulty",
1289
+ "group",
1290
+ "limit",
1291
+ "main",
1292
+ "move",
1293
+ "feeling",
1294
+ "light",
1295
+ "example",
1296
+ "mission",
1297
+ "might",
1298
+ "wait",
1299
+ "wheel",
1300
+ "shop",
1301
+ "host",
1302
+ "classic",
1303
+ "alternative",
1304
+ "cause",
1305
+ "agent",
1306
+ "consist",
1307
+ "table",
1308
+ "airline",
1309
+ "text",
1310
+ "pool",
1311
+ "craft",
1312
+ "range",
1313
+ "fuel",
1314
+ "tool",
1315
+ "partner",
1316
+ "load",
1317
+ "entrance",
1318
+ "deposit",
1319
+ "hate",
1320
+ "article",
1321
+ "video",
1322
+ "summer",
1323
+ "feature",
1324
+ "extreme",
1325
+ "mobile",
1326
+ "hospital",
1327
+ "flight",
1328
+ "fall",
1329
+ "pension",
1330
+ "piano",
1331
+ "fail",
1332
+ "result",
1333
+ "rub",
1334
+ "gap",
1335
+ "system",
1336
+ "report",
1337
+ "suck",
1338
+ "ordinary",
1339
+ "wind",
1340
+ "nerve",
1341
+ "ask",
1342
+ "shine",
1343
+ "note",
1344
+ "line",
1345
+ "mom",
1346
+ "perception",
1347
+ "brother",
1348
+ "reference",
1349
+ "bend",
1350
+ "charge",
1351
+ "treat",
1352
+ "trick",
1353
+ "term",
1354
+ "homework",
1355
+ "bake",
1356
+ "bid",
1357
+ "status",
1358
+ "project",
1359
+ "strategy",
1360
+ "orange",
1361
+ "let",
1362
+ "enthusiasm",
1363
+ "parent",
1364
+ "concentrate",
1365
+ "device",
1366
+ "travel",
1367
+ "poetry",
1368
+ "business",
1369
+ "society",
1370
+ "kiss",
1371
+ "end",
1372
+ "vegetable",
1373
+ "employ",
1374
+ "schedule",
1375
+ "hour",
1376
+ "brave",
1377
+ "focus",
1378
+ "process",
1379
+ "movie",
1380
+ "illegal",
1381
+ "general",
1382
+ "coffee",
1383
+ "ad",
1384
+ "highway",
1385
+ "chemistry",
1386
+ "psychology",
1387
+ "hire",
1388
+ "bell",
1389
+ "conference",
1390
+ "relief",
1391
+ "show",
1392
+ "neat",
1393
+ "funny",
1394
+ "weight",
1395
+ "quality",
1396
+ "club",
1397
+ "daughter",
1398
+ "zone",
1399
+ "touch",
1400
+ "tonight",
1401
+ "shock",
1402
+ "burn",
1403
+ "excuse",
1404
+ "name",
1405
+ "survey",
1406
+ "landscape",
1407
+ "advance",
1408
+ "satisfaction",
1409
+ "bread",
1410
+ "disaster",
1411
+ "item",
1412
+ "hat",
1413
+ "prior",
1414
+ "shopping",
1415
+ "visit",
1416
+ "east",
1417
+ "photo",
1418
+ "home",
1419
+ "idea",
1420
+ "father",
1421
+ "comparison",
1422
+ "cat",
1423
+ "pipe",
1424
+ "winner",
1425
+ "count",
1426
+ "lake",
1427
+ "fight",
1428
+ "prize",
1429
+ "foundation",
1430
+ "dog",
1431
+ "keep",
1432
+ "ideal",
1433
+ "fan",
1434
+ "struggle",
1435
+ "peak",
1436
+ "safety",
1437
+ "solution",
1438
+ "hell",
1439
+ "conclusion",
1440
+ "population",
1441
+ "strain",
1442
+ "alarm",
1443
+ "measurement",
1444
+ "second",
1445
+ "train",
1446
+ "race",
1447
+ "due",
1448
+ "insurance",
1449
+ "boss",
1450
+ "tree",
1451
+ "monitor",
1452
+ "sick",
1453
+ "course",
1454
+ "drag",
1455
+ "appointment",
1456
+ "slice",
1457
+ "still",
1458
+ "care",
1459
+ "patience",
1460
+ "rich",
1461
+ "escape",
1462
+ "emotion",
1463
+ "royal",
1464
+ "female",
1465
+ "childhood",
1466
+ "government",
1467
+ "picture",
1468
+ "will",
1469
+ "sock",
1470
+ "big",
1471
+ "gate",
1472
+ "oil",
1473
+ "cross",
1474
+ "pin",
1475
+ "improvement",
1476
+ "championship",
1477
+ "silly",
1478
+ "help",
1479
+ "sky",
1480
+ "pitch",
1481
+ "man",
1482
+ "diamond",
1483
+ "most",
1484
+ "transition",
1485
+ "work",
1486
+ "science",
1487
+ "committee",
1488
+ "moment",
1489
+ "fix",
1490
+ "teaching",
1491
+ "dig",
1492
+ "specialist",
1493
+ "complex",
1494
+ "guide",
1495
+ "people",
1496
+ "dead",
1497
+ "voice",
1498
+ "original",
1499
+ "break",
1500
+ "topic",
1501
+ "data",
1502
+ "degree",
1503
+ "reading",
1504
+ "recording",
1505
+ "bunch",
1506
+ "reach",
1507
+ "judgment",
1508
+ "lie",
1509
+ "regular",
1510
+ "set",
1511
+ "painting",
1512
+ "mode",
1513
+ "list",
1514
+ "player",
1515
+ "bear",
1516
+ "north",
1517
+ "wonder",
1518
+ "carpet",
1519
+ "heavy",
1520
+ "officer",
1521
+ "negative",
1522
+ "clock",
1523
+ "unique",
1524
+ "baby",
1525
+ "pain",
1526
+ "assumption",
1527
+ "disk",
1528
+ "iron",
1529
+ "bill",
1530
+ "drawer",
1531
+ "look",
1532
+ "double",
1533
+ "mistake",
1534
+ "finish",
1535
+ "future",
1536
+ "brilliant",
1537
+ "contact",
1538
+ "math",
1539
+ "rice",
1540
+ "leave",
1541
+ "restaurant",
1542
+ "discount",
1543
+ "sex",
1544
+ "virus",
1545
+ "bit",
1546
+ "trust",
1547
+ "event",
1548
+ "wear",
1549
+ "juice",
1550
+ "failure",
1551
+ "bug",
1552
+ "context",
1553
+ "mud",
1554
+ "whole",
1555
+ "wrap",
1556
+ "intention",
1557
+ "draft",
1558
+ "pressure",
1559
+ "cake",
1560
+ "dark",
1561
+ "explanation",
1562
+ "space",
1563
+ "angle",
1564
+ "word",
1565
+ "efficiency",
1566
+ "management",
1567
+ "habit",
1568
+ "star",
1569
+ "chance",
1570
+ "finding",
1571
+ "transportation",
1572
+ "stand",
1573
+ "criticism",
1574
+ "flow",
1575
+ "door",
1576
+ "injury",
1577
+ "insect",
1578
+ "surprise",
1579
+ "apartment",
1580
+ ] # pylint: disable=line-too-long
1581
+
1582
+ # ISO 639-1 codes to language names.
1583
+ LANGUAGE_CODES = immutabledict.immutabledict(
1584
+ {
1585
+ "en": "English",
1586
+ "es": "Spanish",
1587
+ "pt": "Portuguese",
1588
+ "ar": "Arabic",
1589
+ "hi": "Hindi",
1590
+ "fr": "French",
1591
+ "ru": "Russian",
1592
+ "de": "German",
1593
+ "ja": "Japanese",
1594
+ "it": "Italian",
1595
+ "bn": "Bengali",
1596
+ "uk": "Ukrainian",
1597
+ "th": "Thai",
1598
+ "ur": "Urdu",
1599
+ "ta": "Tamil",
1600
+ "te": "Telugu",
1601
+ "bg": "Bulgarian",
1602
+ "ko": "Korean",
1603
+ "pl": "Polish",
1604
+ "he": "Hebrew",
1605
+ "fa": "Persian",
1606
+ "vi": "Vietnamese",
1607
+ "ne": "Nepali",
1608
+ "sw": "Swahili",
1609
+ "kn": "Kannada",
1610
+ "mr": "Marathi",
1611
+ "gu": "Gujarati",
1612
+ "pa": "Punjabi",
1613
+ "ml": "Malayalam",
1614
+ "fi": "Finnish",
1615
+ }
1616
+ )
1617
+
1618
+ _ALPHABETS = "([A-Za-z])"
1619
+ _PREFIXES = "(Mr|St|Mrs|Ms|Dr)[.]"
1620
+ _SUFFIXES = "(Inc|Ltd|Jr|Sr|Co)"
1621
+ _STARTERS = r"(Mr|Mrs|Ms|Dr|Prof|Capt|Cpt|Lt|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
1622
+ _ACRONYMS = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
1623
+ _WEBSITES = "[.](com|net|org|io|gov|edu|me)"
1624
+ _DIGITS = "([0-9])"
1625
+ _MULTIPLE_DOTS = r"\.{2,}"
1626
+
1627
+
1628
+ def split_into_sentences(text):
1629
+ """Split the text into sentences.
1630
+
1631
+ Args:
1632
+ text: A string that consists of more than or equal to one sentences.
1633
+
1634
+ Returns:
1635
+ A list of strings where each string is a sentence.
1636
+ """
1637
+ text = " " + text + " "
1638
+ text = text.replace("\n", " ")
1639
+ text = re.sub(_PREFIXES, "\\1<prd>", text)
1640
+ text = re.sub(_WEBSITES, "<prd>\\1", text)
1641
+ text = re.sub(_DIGITS + "[.]" + _DIGITS, "\\1<prd>\\2", text)
1642
+ text = re.sub(
1643
+ _MULTIPLE_DOTS,
1644
+ lambda match: "<prd>" * len(match.group(0)) + "<stop>",
1645
+ text,
1646
+ )
1647
+ if "Ph.D" in text:
1648
+ text = text.replace("Ph.D.", "Ph<prd>D<prd>")
1649
+ text = re.sub(r"\s" + _ALPHABETS + "[.] ", " \\1<prd> ", text)
1650
+ text = re.sub(_ACRONYMS + " " + _STARTERS, "\\1<stop> \\2", text)
1651
+ text = re.sub(
1652
+ _ALPHABETS + "[.]" + _ALPHABETS + "[.]" + _ALPHABETS + "[.]",
1653
+ "\\1<prd>\\2<prd>\\3<prd>",
1654
+ text,
1655
+ )
1656
+ text = re.sub(_ALPHABETS + "[.]" + _ALPHABETS + "[.]", "\\1<prd>\\2<prd>", text)
1657
+ text = re.sub(" " + _SUFFIXES + "[.] " + _STARTERS, " \\1<stop> \\2", text)
1658
+ text = re.sub(" " + _SUFFIXES + "[.]", " \\1<prd>", text)
1659
+ text = re.sub(" " + _ALPHABETS + "[.]", " \\1<prd>", text)
1660
+ if "”" in text:
1661
+ text = text.replace(".”", "”.")
1662
+ if '"' in text:
1663
+ text = text.replace('."', '".')
1664
+ if "!" in text:
1665
+ text = text.replace('!"', '"!')
1666
+ if "?" in text:
1667
+ text = text.replace('?"', '"?')
1668
+ text = text.replace(".", ".<stop>")
1669
+ text = text.replace("?", "?<stop>")
1670
+ text = text.replace("!", "!<stop>")
1671
+ text = text.replace("<prd>", ".")
1672
+ sentences = text.split("<stop>")
1673
+ sentences = [s.strip() for s in sentences]
1674
+ if sentences and not sentences[-1]:
1675
+ sentences = sentences[:-1]
1676
+ return sentences
1677
+
1678
+
1679
+ def count_words(text):
1680
+ """Counts the number of words."""
1681
+ tokenizer = nltk.tokenize.RegexpTokenizer(r"\w+")
1682
+ tokens = tokenizer.tokenize(text)
1683
+ num_words = len(tokens)
1684
+ return num_words
1685
+
1686
+
1687
+ @functools.lru_cache(maxsize=None)
1688
+ def _get_sentence_tokenizer():
1689
+ return nltk.data.load("nltk:tokenizers/punkt/english.pickle")
1690
+
1691
+
1692
+ def count_sentences(text):
1693
+ """Count the number of sentences."""
1694
+ tokenizer = _get_sentence_tokenizer()
1695
+ tokenized_sentences = tokenizer.tokenize(text)
1696
+ return len(tokenized_sentences)
1697
+
1698
+
1699
+ def generate_keywords(num_keywords):
1700
+ """Randomly generates a few keywords."""
1701
+ return random.sample(WORD_LIST, k=num_keywords)
synthgen/PtPersonaIFGen/lmeval_pt/utils.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from typing import Dict, Optional, Union
3
+
4
+ from lmeval_pt import instructions_registry
5
+
6
+
7
+ @dataclasses.dataclass
8
+ class InputExample:
9
+ key: int
10
+ instruction_id_list: list[str]
11
+ prompt: str
12
+ kwargs: list[Dict[str, Optional[Union[str, int]]]]
13
+
14
+
15
+ @dataclasses.dataclass
16
+ class OutputExample:
17
+ instruction_id_list: list[str]
18
+ prompt: str
19
+ response: str
20
+ follow_all_instructions: bool
21
+ follow_instruction_list: list[bool]
22
+
23
+
24
+ def test_instruction_following_strict(
25
+ inp,
26
+ response,
27
+ ):
28
+ """Tests response to see if instructions are followed."""
29
+ instruction_list = inp.instruction_id_list
30
+ is_following_list = []
31
+
32
+ for index, instruction_id in enumerate(instruction_list):
33
+ instruction_cls = instructions_registry.INSTRUCTION_DICT[instruction_id]
34
+ instruction = instruction_cls(instruction_id)
35
+
36
+ # Remove None values from kwargs to avoid unexpected keyword argument errors in build_description method.
37
+ kwargs = {k: v for k, v in inp.kwargs[index].items() if v}
38
+ instruction.build_description(**kwargs)
39
+ args = instruction.get_instruction_args()
40
+ if args and "prompt" in args:
41
+ instruction.build_description(prompt=inp.prompt)
42
+
43
+ if response.strip() and instruction.check_following(response):
44
+ is_following_list.append(True)
45
+ else:
46
+ is_following_list.append(False)
47
+
48
+ return OutputExample(
49
+ instruction_id_list=inp.instruction_id_list,
50
+ prompt=inp.prompt,
51
+ response=response,
52
+ follow_all_instructions=all(is_following_list),
53
+ follow_instruction_list=is_following_list,
54
+ )
55
+
56
+
57
+ def test_instruction_following_loose(
58
+ inp,
59
+ response,
60
+ ):
61
+ """Tests response for an upper bound for following instructions."""
62
+ r = response.split("\n")
63
+ response_remove_first = "\n".join(r[1:]).strip()
64
+ response_remove_last = "\n".join(r[:-1]).strip()
65
+ response_remove_both = "\n".join(r[1:-1]).strip()
66
+ revised_response = response.replace("*", "")
67
+ revised_response_remove_first = response_remove_first.replace("*", "")
68
+ revised_response_remove_last = response_remove_last.replace("*", "")
69
+ revised_response_remove_both = response_remove_both.replace("*", "")
70
+ all_responses = [
71
+ response,
72
+ revised_response,
73
+ response_remove_first,
74
+ response_remove_last,
75
+ response_remove_both,
76
+ revised_response_remove_first,
77
+ revised_response_remove_last,
78
+ revised_response_remove_both,
79
+ ]
80
+ instruction_list = inp.instruction_id_list
81
+ is_following_list = []
82
+
83
+ for index, instruction_id in enumerate(instruction_list):
84
+ instruction_cls = instructions_registry.INSTRUCTION_DICT[instruction_id]
85
+ instruction = instruction_cls(instruction_id)
86
+
87
+ # Remove None values from kwargs to avoid unexpected keyword argument errors in build_description method.
88
+ kwargs = {k: v for k, v in inp.kwargs[index].items() if v}
89
+ instruction.build_description(**kwargs)
90
+ args = instruction.get_instruction_args()
91
+ if args and "prompt" in args:
92
+ instruction.build_description(prompt=inp.prompt)
93
+
94
+ is_following = False
95
+ for r in all_responses:
96
+ if r.strip() and instruction.check_following(r):
97
+ is_following = True
98
+ break
99
+
100
+ is_following_list.append(is_following)
101
+
102
+ return OutputExample(
103
+ instruction_id_list=inp.instruction_id_list,
104
+ prompt=inp.prompt,
105
+ response=response,
106
+ follow_all_instructions=all(is_following_list),
107
+ follow_instruction_list=is_following_list,
108
+ )
109
+
110
+
111
+ def process_results(doc, results):
112
+ inp = InputExample(
113
+ key=doc["key"],
114
+ instruction_id_list=doc["instruction_id_list"],
115
+ prompt=doc["prompt"],
116
+ kwargs=doc["kwargs"],
117
+ )
118
+ response = results[0]
119
+
120
+ out_strict = test_instruction_following_strict(inp, response)
121
+ out_loose = test_instruction_following_loose(inp, response)
122
+
123
+ return {
124
+ "prompt_level_strict_acc": out_strict.follow_all_instructions,
125
+ "inst_level_strict_acc": out_strict.follow_instruction_list,
126
+ "prompt_level_loose_acc": out_loose.follow_all_instructions,
127
+ "inst_level_loose_acc": out_loose.follow_instruction_list,
128
+ }
129
+
130
+
131
+ def agg_inst_level_acc(items):
132
+ flat_items = [item for sublist in items for item in sublist]
133
+ inst_level_acc = sum(flat_items) / len(flat_items)
134
+ return inst_level_acc
synthgen/PtPersonaIFGen/main_ifeval_code/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # vLLM Pipeline for IFEval-like Data Generation
2
+ # Replication of distilabel pipeline using vLLM's OpenAI-compatible API
synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (172 Bytes). View file
 
synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/config.cpython-311.pyc ADDED
Binary file (2.22 kB). View file
 
synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/filter_ifeval_data_pt.cpython-311.pyc ADDED
Binary file (12 kB). View file
 
synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/json_schemas_pt.cpython-311.pyc ADDED
Binary file (5.44 kB). View file
 
synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/phase1_magpie.cpython-311.pyc ADDED
Binary file (23.5 kB). View file
 
synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/phase2_detection.cpython-311.pyc ADDED
Binary file (8.03 kB). View file
 
synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/phase3_extraction.cpython-311.pyc ADDED
Binary file (8.59 kB). View file
 
synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/phase4_postprocess.cpython-311.pyc ADDED
Binary file (11 kB). View file
 
synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/run_pipeline.cpython-311.pyc ADDED
Binary file (9.44 kB). View file
 
synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/system_prompts_pt.cpython-311.pyc ADDED
Binary file (49.6 kB). View file
 
synthgen/PtPersonaIFGen/main_ifeval_code/__pycache__/utils.cpython-311.pyc ADDED
Binary file (5.87 kB). View file
 
synthgen/PtPersonaIFGen/main_ifeval_code/config.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Configuração compartilhada e constantes para o pipeline vLLM.
3
+ Referencia pipeline/system_prompts_pt.py e pipeline/json_schemas_pt.py
4
+ """
5
+
6
+ import os
7
+
8
+ from main_ifeval_code.system_prompts_pt import (
9
+ IFEVAL_SYSTEM_PROMPT,
10
+ IFEVAL_INSTRUCTION_ID_LIST_ASSIGNATOR_SYSTEM_PROMPT,
11
+ IFEVAL_KWARGS_ASSIGNATOR_SYSTEM_PROMPT,
12
+ )
13
+ from main_ifeval_code.json_schemas_pt import (
14
+ IFEVAL_INSTRUCTION_ID_LIST_JSON_SCHEMA,
15
+ IFEVAL_RESPONSE_VERIFICATION_FUNCTION_ARGUMENTS_JSON_SCHEMA,
16
+ )
17
+
18
+ # -----------------------------------------------------------------------
19
+ # CONFIGURAÇÃO DO SERVIDOR VLLM
20
+ # -----------------------------------------------------------------------
21
+ VLLM_BASE_URL = os.environ.get("VLLM_BASE_URL", "http://localhost:8021/v1")
22
+ VLLM_API_KEY = os.environ.get("VLLM_API_KEY", "no-key-needed")
23
+
24
+ # -----------------------------------------------------------------------
25
+ # CONFIGURAÇÃO DA FONTE DE PERSONAS
26
+ # -----------------------------------------------------------------------
27
+ # Dataset de personas (Persona Hub - Chan et al., 2024)
28
+ PERSONA_DATASET_ID = os.environ.get("PERSONA_DATASET_ID", "proj-persona/PersonaHub")
29
+ PERSONA_DATASET_SUBSET = os.environ.get("PERSONA_DATASET_SUBSET", "persona")
30
+ # PERSONA_DATASET_ID = os.environ.get("PERSONA_DATASET_ID", "nvidia/Nemotron-Personas-Brazil")
31
+ # PERSONA_DATASET_SUBSET = os.environ.get("PERSONA_DATASET_SUBSET", "default")
32
+ PERSONA_TEXT_FIELD = os.environ.get("PERSONA_TEXT_FIELD", "persona")
33
+ # -----------------------------------------------------------------------
34
+ # PARÂMETROS DE GERAÇÃO (de pipeline/pipeline.py)
35
+ # -----------------------------------------------------------------------
36
+
37
+ # Fase 1: Geração em duas etapas
38
+ PHASE1_INSTRUCTION_TEMPERATURE = 1.2 # Maior para instruções diversas
39
+ PHASE1_RESPONSE_TEMPERATURE = 0.7 # Menor para respostas precisas
40
+ PHASE1_TOP_P = 0.8
41
+ PHASE1_MAX_TOKENS_INSTRUCTION = 2048 # Instruções do usuário são mais curtas
42
+ PHASE1_MAX_TOKENS_RESPONSE = 2048 # Respostas podem ser mais longas
43
+
44
+ # Fase 2: Detecção de restrições (linhas 207-209 de pipeline.py)
45
+ PHASE2_TEMPERATURE = 0.2
46
+ PHASE2_MAX_TOKENS = 1024
47
+
48
+ # Fase 3: Extração de parâmetros (linhas 229-231 de pipeline.py)
49
+ PHASE3_TEMPERATURE = 0.2
50
+ PHASE3_MAX_TOKENS = 2048
51
+
52
+ # -----------------------------------------------------------------------
53
+ # CONFIGURAÇÕES DE LOTE
54
+ # -----------------------------------------------------------------------
55
+ DEFAULT_BATCH_SIZE = 4 # Requisições concorrentes por lote
56
+ DEFAULT_NUM_ROWS = 2000000 # Total de linhas a gerar (de pipeline.py linha 194)
57
+
58
+ # -----------------------------------------------------------------------
59
+ # ARQUIVOS DE SAÍDA
60
+ # -----------------------------------------------------------------------
61
+ PHASE1_OUTPUT = "phase1_generated_output.jsonl"
62
+ PHASE2_OUTPUT = "phase2_detection_output.jsonl"
63
+ PHASE3_OUTPUT = "phase3_extraction_output.jsonl"
64
+ PHASE4_OUTPUT = "phase4_final_output.jsonl"
65
+
66
+ # -----------------------------------------------------------------------
67
+ # RE-EXPORTAR PROMPTS E SCHEMAS
68
+ # -----------------------------------------------------------------------
69
+ __all__ = [
70
+ # Prompts
71
+ "IFEVAL_SYSTEM_PROMPT",
72
+ "IFEVAL_INSTRUCTION_ID_LIST_ASSIGNATOR_SYSTEM_PROMPT",
73
+ "IFEVAL_KWARGS_ASSIGNATOR_SYSTEM_PROMPT",
74
+ "IFEVAL_CONSTRAINT_PHRASINGS",
75
+ # Schemas
76
+ "IFEVAL_INSTRUCTION_ID_LIST_JSON_SCHEMA",
77
+ "IFEVAL_RESPONSE_VERIFICATION_FUNCTION_ARGUMENTS_JSON_SCHEMA",
78
+ # Config
79
+ "VLLM_BASE_URL",
80
+ "VLLM_API_KEY",
81
+ "PERSONA_DATASET_ID",
82
+ "PERSONA_DATASET_SUBSET",
83
+ "PERSONA_TEXT_FIELD",
84
+ "PHASE1_INSTRUCTION_TEMPERATURE",
85
+ "PHASE1_RESPONSE_TEMPERATURE",
86
+ "PHASE1_TOP_P",
87
+ "PHASE1_MAX_TOKENS_INSTRUCTION",
88
+ "PHASE1_MAX_TOKENS_RESPONSE",
89
+ "PHASE2_TEMPERATURE",
90
+ "PHASE2_MAX_TOKENS",
91
+ "PHASE3_TEMPERATURE",
92
+ "PHASE3_MAX_TOKENS",
93
+ "DEFAULT_BATCH_SIZE",
94
+ "DEFAULT_NUM_ROWS",
95
+ "PHASE1_OUTPUT",
96
+ "PHASE2_OUTPUT",
97
+ "PHASE3_OUTPUT",
98
+ "PHASE4_OUTPUT",
99
+ ]
synthgen/PtPersonaIFGen/main_ifeval_code/filter_ifeval_data_pt.py ADDED
@@ -0,0 +1,499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from typing import TYPE_CHECKING
3
+
4
+ from datasets import load_dataset
5
+ from lmeval_pt.utils import process_results
6
+
7
+ if TYPE_CHECKING:
8
+ from datasets import Dataset
9
+
10
+
11
+ INSTRUCTION_ARGS = {
12
+ "keywords:existence": ["keywords"],
13
+ "keywords:frequency": ["keyword", "frequency", "relation"],
14
+ "keywords:forbidden_words": ["forbidden_words"],
15
+ "keywords:letter_frequency": ["letter", "let_frequency", "let_relation"],
16
+ "language:response_language": ["language"],
17
+ "length_constraints:number_sentences": ["num_sentences", "relation"],
18
+ "length_constraints:number_paragraphs": ["num_paragraphs"],
19
+ "length_constraints:number_words": ["num_words", "relation"],
20
+ "length_constraints:nth_paragraph_first_word": [
21
+ "num_paragraphs",
22
+ "nth_paragraph",
23
+ "first_word",
24
+ ],
25
+ "detectable_content:number_placeholders": ["num_placeholders"],
26
+ "detectable_content:postscript": ["postscript_marker"],
27
+ "detectable_format:number_bullet_lists": ["num_bullets"],
28
+ "detectable_format:constrained_response": [],
29
+ "detectable_format:number_highlighted_sections": ["num_highlights"],
30
+ "detectable_format:multiple_sections": ["section_spliter", "num_sections"],
31
+ "detectable_format:json_format": [],
32
+ "detectable_format:title": [],
33
+ "combination:two_responses": [],
34
+ "combination:repeat_prompt": ["prompt_to_repeat"],
35
+ "startend:end_checker": ["end_phrase"],
36
+ "change_case:capital_word_frequency": ["capital_frequency", "capital_relation"],
37
+ "change_case:english_capital": [],
38
+ "change_case:english_lowercase": [],
39
+ "punctuation:no_comma": [],
40
+ "startend:quotation": [],
41
+ # --- IFTrain out-of-distribution constraints ---
42
+ "keywords:word_once": ["keyword"],
43
+ "keywords:word_count_diff_numb": ["keyword", "count"],
44
+ "keywords:exclude_word_harder": ["keyword"],
45
+ "keywords:letter_counting2": ["letter", "count"],
46
+ "length_constraints:paragraphs_two_line_breaks": ["num_paragraphs"],
47
+ "startend:first_word_sent": ["first_word"],
48
+ "startend:first_word_answer": ["first_word"],
49
+ "startend:last_word_sent": ["last_word"],
50
+ "startend:last_word_answer": ["last_word"],
51
+ "detectable_format:bigram_wrapping": [],
52
+ "combination:copying_simple": ["prompt_to_copy"],
53
+ "combination:copying_multiple": ["prompt_to_copy", "copy_count"],
54
+ "punctuation:no_dot": [],
55
+ "punctuation:no_exclamation": [],
56
+ "keywords:lowercase_counting": ["max_count"],
57
+ "length_constraints:letter_total_count": ["num_letters", "relation"],
58
+ "length_constraints:counting_composition": ["n_sent", "n_words"],
59
+ "keywords:count_unique": [],
60
+ "keywords:count_increment_word": ["keyword1", "keyword2"],
61
+ "keywords:palindrome": [],
62
+ "keywords:keyword_specific_pos": ["keyword", "sentence_num", "word_num"],
63
+ "startend:start_end_same_word": [],
64
+ "combination:repeat_phrase": ["phrase", "repeat_count"],
65
+ "keywords:no_adjacent_consecutive": [],
66
+ "detectable_format:square_brackets_word": [],
67
+ "detectable_format:sentence_hyphens": [],
68
+ "combination:copy_instruction": ["instruction_to_copy"],
69
+ "combination:copy_span_idx": ["source_text", "n_start", "n_end"],
70
+ }
71
+
72
+ ALL_ARGUMENTS = {
73
+ "keywords",
74
+ "keyword",
75
+ "frequency",
76
+ "relation",
77
+ "forbidden_words",
78
+ "letter",
79
+ "let_frequency",
80
+ "let_relation",
81
+ "language",
82
+ "num_sentences",
83
+ "num_paragraphs",
84
+ "num_words",
85
+ "nth_paragraph",
86
+ "first_word",
87
+ "num_placeholders",
88
+ "postscript_marker",
89
+ "num_bullets",
90
+ "num_highlights",
91
+ "section_spliter",
92
+ "num_sections",
93
+ "prompt_to_repeat",
94
+ "end_phrase",
95
+ "capital_frequency",
96
+ "capital_relation",
97
+ # --- IFTrain out-of-distribution arguments ---
98
+ "count",
99
+ "last_word",
100
+ "prompt_to_copy",
101
+ "copy_count",
102
+ "max_count",
103
+ "num_letters",
104
+ "n_sent",
105
+ "n_words",
106
+ "keyword1",
107
+ "keyword2",
108
+ "sentence_num",
109
+ "word_num",
110
+ "phrase",
111
+ "repeat_count",
112
+ "instruction_to_copy",
113
+ "source_text",
114
+ "n_start",
115
+ "n_end",
116
+ }
117
+
118
+ IFEVAL_INSTRUCTION_CONFLICTS = {
119
+ "keywords:existence": {"keywords:existence"},
120
+ "keywords:frequency": {"keywords:frequency"},
121
+ "keywords:forbidden_words": {"keywords:forbidden_words"},
122
+ "keywords:letter_frequency": {"keywords:letter_frequency"},
123
+ "language:response_language": {
124
+ "change_case:english_capital",
125
+ "change_case:english_lowercase",
126
+ "startend:end_checker",
127
+ "keywords:frequency",
128
+ "keywords:forbidden_words",
129
+ "detectable_format:multiple_sections",
130
+ "keywords:existence",
131
+ "language:response_language",
132
+ },
133
+ "length_constraints:number_sentences": {"length_constraints:number_sentences"},
134
+ "length_constraints:number_paragraphs": {
135
+ "length_constraints:number_sentences",
136
+ "length_constraints:nth_paragraph_first_word",
137
+ "length_constraints:number_paragraphs",
138
+ },
139
+ "length_constraints:number_words": {"length_constraints:number_words"},
140
+ "length_constraints:nth_paragraph_first_word": {
141
+ "length_constraints:nth_paragraph_first_word",
142
+ "length_constraints:number_paragraphs",
143
+ },
144
+ "detectable_content:number_placeholders": {
145
+ "detectable_content:number_placeholders"
146
+ },
147
+ "detectable_content:postscript": {"detectable_content:postscript"},
148
+ "detectable_format:number_bullet_lists": {"detectable_format:number_bullet_lists"},
149
+ "detectable_format:constrained_response": {
150
+ "startend:quotation",
151
+ "length_constraints:number_words",
152
+ "detectable_format:constrained_response",
153
+ "change_case:english_capital",
154
+ "startend:end_checker",
155
+ "keywords:forbidden_words",
156
+ "length_constraints:number_sentences",
157
+ "combination:repeat_prompt",
158
+ "combination:two_responses",
159
+ "punctuation:no_comma",
160
+ "detectable_format:number_highlighted_sections",
161
+ "change_case:english_lowercase",
162
+ "detectable_format:number_bullet_lists",
163
+ "detectable_content:number_placeholders",
164
+ "keywords:letter_frequency",
165
+ "keywords:frequency",
166
+ "length_constraints:number_paragraphs",
167
+ "keywords:existence",
168
+ "length_constraints:nth_paragraph_first_word",
169
+ "detectable_format:title",
170
+ "change_case:capital_word_frequency",
171
+ "detectable_format:json_format",
172
+ "detectable_format:multiple_sections",
173
+ "detectable_content:postscript",
174
+ "language:response_language",
175
+ },
176
+ "detectable_format:number_highlighted_sections": {
177
+ "detectable_format:number_highlighted_sections"
178
+ },
179
+ "detectable_format:multiple_sections": {
180
+ "detectable_format:multiple_sections",
181
+ "detectable_format:number_highlighted_sections",
182
+ "language:response_language",
183
+ },
184
+ "detectable_format:json_format": {
185
+ "startend:quotation",
186
+ "length_constraints:number_words",
187
+ "detectable_format:constrained_response",
188
+ "change_case:english_capital",
189
+ "detectable_format:number_bullet_lists",
190
+ "detectable_content:number_placeholders",
191
+ "startend:end_checker",
192
+ "keywords:letter_frequency",
193
+ "keywords:frequency",
194
+ "length_constraints:number_paragraphs",
195
+ "length_constraints:nth_paragraph_first_word",
196
+ "length_constraints:number_sentences",
197
+ "language:response_language",
198
+ "combination:repeat_prompt",
199
+ "detectable_format:title",
200
+ "change_case:capital_word_frequency",
201
+ "combination:two_responses",
202
+ "detectable_format:json_format",
203
+ "punctuation:no_comma",
204
+ "detectable_format:number_highlighted_sections",
205
+ "detectable_format:multiple_sections",
206
+ "detectable_content:postscript",
207
+ "change_case:english_lowercase",
208
+ },
209
+ "detectable_format:title": {"detectable_format:title"},
210
+ "combination:two_responses": {
211
+ "startend:quotation",
212
+ "length_constraints:number_words",
213
+ "detectable_format:constrained_response",
214
+ "change_case:english_capital",
215
+ "detectable_format:number_bullet_lists",
216
+ "detectable_content:number_placeholders",
217
+ "startend:end_checker",
218
+ "keywords:letter_frequency",
219
+ "keywords:frequency",
220
+ "length_constraints:number_paragraphs",
221
+ "length_constraints:nth_paragraph_first_word",
222
+ "length_constraints:number_sentences",
223
+ "combination:repeat_prompt",
224
+ "change_case:capital_word_frequency",
225
+ "combination:two_responses",
226
+ "detectable_format:json_format",
227
+ "detectable_format:number_highlighted_sections",
228
+ "detectable_format:multiple_sections",
229
+ "detectable_content:postscript",
230
+ "change_case:english_lowercase",
231
+ },
232
+ "combination:repeat_prompt": {
233
+ "startend:quotation",
234
+ "length_constraints:number_words",
235
+ "detectable_format:constrained_response",
236
+ "change_case:english_capital",
237
+ "detectable_format:number_bullet_lists",
238
+ "detectable_content:number_placeholders",
239
+ "startend:end_checker",
240
+ "keywords:letter_frequency",
241
+ "keywords:forbidden_words",
242
+ "keywords:frequency",
243
+ "length_constraints:number_paragraphs",
244
+ "length_constraints:nth_paragraph_first_word",
245
+ "length_constraints:number_sentences",
246
+ "language:response_language",
247
+ "combination:repeat_prompt",
248
+ "change_case:capital_word_frequency",
249
+ "combination:two_responses",
250
+ "detectable_format:json_format",
251
+ "detectable_format:number_highlighted_sections",
252
+ "detectable_format:multiple_sections",
253
+ "detectable_content:postscript",
254
+ "change_case:english_lowercase",
255
+ },
256
+ "startend:end_checker": {"startend:end_checker"},
257
+ "change_case:capital_word_frequency": {
258
+ "change_case:english_capital",
259
+ "change_case:capital_word_frequency",
260
+ "change_case:english_lowercase",
261
+ },
262
+ "change_case:english_capital": {"change_case:english_capital"},
263
+ "change_case:english_lowercase": {
264
+ "change_case:english_capital",
265
+ "change_case:english_lowercase",
266
+ },
267
+ "punctuation:no_comma": {"punctuation:no_comma"},
268
+ "startend:quotation": {"startend:quotation", "detectable_format:title"},
269
+ # --- IFTrain out-of-distribution constraints ---
270
+ "keywords:word_once": {"keywords:word_once"},
271
+ "keywords:word_count_diff_numb": {"keywords:word_count_diff_numb"},
272
+ "keywords:exclude_word_harder": {"keywords:exclude_word_harder"},
273
+ "keywords:letter_counting2": {"keywords:letter_counting2", "keywords:letter_frequency"},
274
+ "length_constraints:paragraphs_two_line_breaks": {
275
+ "length_constraints:paragraphs_two_line_breaks",
276
+ "length_constraints:number_paragraphs",
277
+ "length_constraints:nth_paragraph_first_word",
278
+ "length_constraints:counting_composition",
279
+ },
280
+ "startend:first_word_sent": {
281
+ "startend:first_word_sent",
282
+ "startend:first_word_answer",
283
+ },
284
+ "startend:first_word_answer": {
285
+ "startend:first_word_answer",
286
+ "startend:first_word_sent",
287
+ },
288
+ "startend:last_word_sent": {
289
+ "startend:last_word_sent",
290
+ "startend:last_word_answer",
291
+ "startend:start_end_same_word",
292
+ },
293
+ "startend:last_word_answer": {
294
+ "startend:last_word_answer",
295
+ "startend:last_word_sent",
296
+ "startend:start_end_same_word",
297
+ },
298
+ "detectable_format:bigram_wrapping": {"detectable_format:bigram_wrapping"},
299
+ "combination:copying_simple": {
300
+ "combination:copying_simple",
301
+ "combination:copying_multiple",
302
+ "combination:copy_instruction",
303
+ "combination:copy_span_idx",
304
+ "combination:repeat_prompt",
305
+ "combination:two_responses",
306
+ "combination:repeat_phrase",
307
+ },
308
+ "combination:copying_multiple": {
309
+ "combination:copying_multiple",
310
+ "combination:copying_simple",
311
+ "combination:copy_instruction",
312
+ "combination:copy_span_idx",
313
+ "combination:repeat_prompt",
314
+ "combination:two_responses",
315
+ "combination:repeat_phrase",
316
+ },
317
+ "punctuation:no_dot": {
318
+ "punctuation:no_dot",
319
+ "length_constraints:number_sentences",
320
+ "length_constraints:counting_composition",
321
+ },
322
+ "punctuation:no_exclamation": {"punctuation:no_exclamation"},
323
+ "keywords:lowercase_counting": {"keywords:lowercase_counting"},
324
+ "length_constraints:letter_total_count": {"length_constraints:letter_total_count"},
325
+ "length_constraints:counting_composition": {
326
+ "length_constraints:counting_composition",
327
+ "length_constraints:number_paragraphs",
328
+ "length_constraints:paragraphs_two_line_breaks",
329
+ "length_constraints:number_sentences",
330
+ "punctuation:no_dot",
331
+ },
332
+ "keywords:count_unique": {"keywords:count_unique"},
333
+ "keywords:count_increment_word": {"keywords:count_increment_word"},
334
+ "keywords:palindrome": {"keywords:palindrome"},
335
+ "keywords:keyword_specific_pos": {"keywords:keyword_specific_pos"},
336
+ "startend:start_end_same_word": {
337
+ "startend:start_end_same_word",
338
+ "startend:last_word_sent",
339
+ "startend:last_word_answer",
340
+ },
341
+ "combination:repeat_phrase": {
342
+ "combination:repeat_phrase",
343
+ "combination:copying_simple",
344
+ "combination:copying_multiple",
345
+ "combination:copy_instruction",
346
+ "combination:copy_span_idx",
347
+ },
348
+ "keywords:no_adjacent_consecutive": {"keywords:no_adjacent_consecutive"},
349
+ "detectable_format:square_brackets_word": {
350
+ "detectable_format:square_brackets_word",
351
+ "detectable_content:number_placeholders",
352
+ },
353
+ "detectable_format:sentence_hyphens": {"detectable_format:sentence_hyphens"},
354
+ "combination:copy_instruction": {
355
+ "combination:copy_instruction",
356
+ "combination:copying_simple",
357
+ "combination:copying_multiple",
358
+ "combination:copy_span_idx",
359
+ "combination:repeat_prompt",
360
+ "combination:two_responses",
361
+ "combination:repeat_phrase",
362
+ },
363
+ "combination:copy_span_idx": {
364
+ "combination:copy_span_idx",
365
+ "combination:copying_simple",
366
+ "combination:copying_multiple",
367
+ "combination:copy_instruction",
368
+ "combination:repeat_prompt",
369
+ "combination:two_responses",
370
+ "combination:repeat_phrase",
371
+ },
372
+ }
373
+
374
+ LANGUAGE_TO_CODE = {
375
+ "English": "en",
376
+ "Spanish": "es",
377
+ "Portuguese": "pt",
378
+ "Arabic": "ar",
379
+ "Hindi": "hi",
380
+ "French": "fr",
381
+ "Russian": "ru",
382
+ "German": "de",
383
+ "Japanese": "ja",
384
+ "Italian": "it",
385
+ "Bengali": "bn",
386
+ "Ukrainian": "uk",
387
+ "Thai": "th",
388
+ "Urdu": "ur",
389
+ "Tamil": "ta",
390
+ "Telugu": "te",
391
+ "Bulgarian": "bg",
392
+ "Korean": "ko",
393
+ "Polish": "pl",
394
+ "Hebrew": "he",
395
+ "Persian": "fa",
396
+ "Vietnamese": "vi",
397
+ "Nepali": "ne",
398
+ "Swahili": "sw",
399
+ "Kannada": "kn",
400
+ "Marathi": "mr",
401
+ "Gujarati": "gu",
402
+ "Punjabi": "pa",
403
+ "Malayalam": "ml",
404
+ "Finnish": "fi",
405
+ }
406
+
407
+ RELATION_PT_TO_EN = {
408
+ "menos de": "less than",
409
+ "pelo menos": "at least",
410
+ }
411
+
412
+ def build_instruction_kwargs(row: dict) -> dict:
413
+ """Builds the list of `kwargs` for each instruction in `instruction_id_list`."""
414
+ kwargs = row["kwargs"]
415
+
416
+ if kwargs is None:
417
+ return {"valid_kwargs_json": False}
418
+
419
+ try:
420
+ kwargs = json.loads(row["kwargs"])
421
+ except json.JSONDecodeError:
422
+ return {"valid_kwargs_json": False}
423
+
424
+ instruction_id_list = row["instruction_id_list"]
425
+ kwargs_list = []
426
+ for instruction_id in instruction_id_list:
427
+ args = INSTRUCTION_ARGS[instruction_id]
428
+ instruction_kwargs = {}
429
+ for arg in args:
430
+ value = kwargs[arg]
431
+ if arg == "language":
432
+ if value in LANGUAGE_TO_CODE:
433
+ value = LANGUAGE_TO_CODE[value]
434
+ elif value not in LANGUAGE_TO_CODE.values():
435
+ return {"valid_kwargs_json": False}
436
+ # Normalize Portuguese relation values to English for lm-eval
437
+ elif arg in ("relation", "let_relation", "capital_relation"):
438
+ if value in RELATION_PT_TO_EN:
439
+ value = RELATION_PT_TO_EN[value]
440
+ instruction_kwargs[arg] = value
441
+ kwargs_list.append(instruction_kwargs)
442
+
443
+ return {"kwargs": json.dumps(kwargs_list), "valid_kwargs_json": True}
444
+
445
+
446
+ def filter_not_valid_rows(row: dict) -> bool:
447
+ """Filters out rows which their JSON kwargs are not valid or that the instructions
448
+ in their `instruction_id_list` conflict each other."""
449
+ valid_kwargs_json = row["valid_kwargs_json"]
450
+ if not valid_kwargs_json:
451
+ return False
452
+
453
+ instruction_id_list = row["instruction_id_list"]
454
+ for instruction_id in instruction_id_list:
455
+ conflicts = IFEVAL_INSTRUCTION_CONFLICTS[instruction_id]
456
+ if any(
457
+ conflict in instruction_id_list
458
+ for conflict in conflicts
459
+ if conflict != instruction_id
460
+ ):
461
+ return False
462
+
463
+ return True
464
+
465
+
466
+ def get_ifeval_results(row: dict) -> dict:
467
+ """Checks if the `response` correct is OK using the IFEval benchmark code from `lm-evaluation-harness`."""
468
+ results = [row["response"]]
469
+ doc = row.copy()
470
+ doc["kwargs"] = json.loads(doc["kwargs"])
471
+ try:
472
+ return process_results(doc, results)
473
+ except Exception as e:
474
+ return {
475
+ "prompt_level_strict_acc": False,
476
+ "inst_level_strict_acc": [],
477
+ "prompt_level_loose_acc": False,
478
+ "inst_level_loose_acc": [],
479
+ "validation_error": str(e), # Capture the actual error
480
+ }
481
+
482
+
483
+ def get_dataset() -> "Dataset":
484
+ dataset = load_dataset("argilla/ifeval-like-data", split="train")
485
+ dataset = dataset.map(build_instruction_kwargs)
486
+ dataset = dataset.filter(filter_not_valid_rows)
487
+ dataset = dataset.add_column("key", list(range(len(dataset))))
488
+ dataset = dataset.rename_column("instruction", "prompt")
489
+ dataset = dataset.select_columns(
490
+ ["key", "prompt", "response", "instruction_id_list", "kwargs"]
491
+ )
492
+ dataset = dataset.map(get_ifeval_results)
493
+ dataset = dataset.filter(lambda x: x["prompt_level_strict_acc"])
494
+ return dataset
495
+
496
+
497
+ if __name__ == "__main__":
498
+ dataset = get_dataset()
499
+ dataset.push_to_hub("argilla/ifeval-like-data", config_name="filtered")
synthgen/PtPersonaIFGen/main_ifeval_code/json_schemas_pt.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ IFEVAL_RESPONSE_VERIFICATION_FUNCTION_ARGUMENTS_JSON_SCHEMA = {
2
+ "type": "object",
3
+ "properties": {
4
+ "language": {
5
+ "type": ["string", "null"],
6
+ },
7
+ "num_sentences": {
8
+ "type": ["integer", "null"],
9
+ "minimum": 1,
10
+ },
11
+ "relation": {
12
+ "type": ["string", "null"],
13
+ "enum": ["less than", "at least", "menos de", "pelo menos"],
14
+ },
15
+ "num_placeholders": {
16
+ "type": ["integer", "null"],
17
+ "minimum": 1,
18
+ },
19
+ "num_bullets": {
20
+ "type": ["integer", "null"],
21
+ "minimum": 1,
22
+ },
23
+ "starter": {
24
+ "type": ["string", "null"],
25
+ },
26
+ "num_highlights": {
27
+ "type": ["integer", "null"],
28
+ "minimum": 1,
29
+ },
30
+ "section_spliter": {
31
+ "type": ["string", "null"],
32
+ "enum": ["Section", "SECTION", "Seção", "SEÇÃO"],
33
+ },
34
+ "num_sections": {
35
+ "type": ["integer", "null"],
36
+ "minimum": 1,
37
+ },
38
+ "num_paragraphs": {
39
+ "type": ["integer", "null"],
40
+ "minimum": 1,
41
+ },
42
+ "postscript_marker": {
43
+ "type": ["string", "null"],
44
+ "enum": ["P.S.", "P.P.S"],
45
+ },
46
+ "original_message": {
47
+ "type": ["string", "null"],
48
+ },
49
+ "keywords": {
50
+ "type": "array",
51
+ "items": {"type": "string"},
52
+ },
53
+ "keyword": {
54
+ "type": ["string", "null"],
55
+ },
56
+ "frequency": {
57
+ "type": ["integer", "null"],
58
+ "minimum": 1,
59
+ },
60
+ "num_words": {
61
+ "type": ["integer", "null"],
62
+ "minimum": 1,
63
+ },
64
+ "nth_paragraph": {
65
+ "type": ["integer", "null"],
66
+ "minimum": 1,
67
+ },
68
+ "first_word": {
69
+ "type": ["string", "null"],
70
+ },
71
+ "key_sentences": {
72
+ "type": ["array", "null"],
73
+ "items": {"type": "string"},
74
+ },
75
+ "forbidden_words": {
76
+ "type": ["array", "null"],
77
+ "items": {"type": "string"},
78
+ },
79
+ "original_paragraph": {
80
+ "type": ["string", "null"],
81
+ },
82
+ "low": {
83
+ "type": ["integer", "null"],
84
+ "minimum": 0,
85
+ },
86
+ "high": {
87
+ "type": ["integer", "null"],
88
+ "minimum": 0,
89
+ },
90
+ "prompt_to_repeat": {
91
+ "type": ["string", "null"],
92
+ },
93
+ "end_phrase": {
94
+ "type": ["string", "null"],
95
+ },
96
+ "letter": {
97
+ "type": ["string", "null"],
98
+ "minLength": 1,
99
+ "maxLength": 1,
100
+ "pattern": "[a-zA-Z]",
101
+ },
102
+ "let_frequency": {
103
+ "type": ["integer", "null"],
104
+ "minimum": 1,
105
+ },
106
+ "let_relation": {
107
+ "type": ["string", "null"],
108
+ "enum": ["less than", "at least", "menos de", "pelo menos"],
109
+ },
110
+ "capital_frequency": {
111
+ "type": ["integer", "null"],
112
+ "minimum": 1,
113
+ },
114
+ "capital_relation": {
115
+ "type": ["string", "null"],
116
+ "enum": ["less than", "at least", "menos de", "pelo menos"],
117
+ },
118
+ "count": {
119
+ "type": ["integer", "null"],
120
+ "minimum": 1,
121
+ },
122
+ "last_word": {
123
+ "type": ["string", "null"],
124
+ },
125
+ "prompt_to_copy": {
126
+ "type": ["string", "null"],
127
+ },
128
+ "copy_count": {
129
+ "type": ["integer", "null"],
130
+ "minimum": 2,
131
+ },
132
+ "max_count": {
133
+ "type": ["integer", "null"],
134
+ "minimum": 1,
135
+ },
136
+ "num_letters": {
137
+ "type": ["integer", "null"],
138
+ "minimum": 1,
139
+ },
140
+ "n_sent": {
141
+ "type": ["integer", "null"],
142
+ "minimum": 1,
143
+ },
144
+ "n_words": {
145
+ "type": ["integer", "null"],
146
+ "minimum": 1,
147
+ },
148
+ "keyword1": {
149
+ "type": ["string", "null"],
150
+ },
151
+ "keyword2": {
152
+ "type": ["string", "null"],
153
+ },
154
+ "sentence_num": {
155
+ "type": ["integer", "null"],
156
+ "minimum": 1,
157
+ },
158
+ "word_num": {
159
+ "type": ["integer", "null"],
160
+ "minimum": 1,
161
+ },
162
+ "phrase": {
163
+ "type": ["string", "null"],
164
+ },
165
+ "repeat_count": {
166
+ "type": ["integer", "null"],
167
+ "minimum": 2,
168
+ },
169
+ "instruction_to_copy": {
170
+ "type": ["string", "null"],
171
+ },
172
+ "source_text": {
173
+ "type": ["string", "null"],
174
+ },
175
+ "n_start": {
176
+ "type": ["integer", "null"],
177
+ "minimum": 0,
178
+ },
179
+ "n_end": {
180
+ "type": ["integer", "null"],
181
+ "minimum": 0,
182
+ },
183
+ },
184
+ "required": [
185
+ "language",
186
+ "num_sentences",
187
+ "relation",
188
+ "num_placeholders",
189
+ "num_bullets",
190
+ "starter",
191
+ "num_highlights",
192
+ "section_spliter",
193
+ "num_sections",
194
+ "num_paragraphs",
195
+ "postscript_marker",
196
+ "original_message",
197
+ "keywords",
198
+ "keyword",
199
+ "frequency",
200
+ "num_words",
201
+ "nth_paragraph",
202
+ "first_word",
203
+ "key_sentences",
204
+ "forbidden_words",
205
+ "original_paragraph",
206
+ "low",
207
+ "high",
208
+ "prompt_to_repeat",
209
+ "end_phrase",
210
+ "letter",
211
+ "let_frequency",
212
+ "let_relation",
213
+ "capital_frequency",
214
+ "capital_relation",
215
+ "count",
216
+ "last_word",
217
+ "prompt_to_copy",
218
+ "copy_count",
219
+ "max_count",
220
+ "num_letters",
221
+ "n_sent",
222
+ "n_words",
223
+ "keyword1",
224
+ "keyword2",
225
+ "sentence_num",
226
+ "word_num",
227
+ "phrase",
228
+ "repeat_count",
229
+ "instruction_to_copy",
230
+ "source_text",
231
+ "n_start",
232
+ "n_end",
233
+ ],
234
+ "additionalProperties": False,
235
+ }
236
+
237
+ IFEVAL_INSTRUCTION_ID_LIST_JSON_SCHEMA = {
238
+ "type": "object",
239
+ "properties": {
240
+ "instruction_id_list": {
241
+ "type": "array",
242
+ "items": {
243
+ "type": "string",
244
+ "enum": [
245
+ "keywords:existence",
246
+ "keywords:frequency",
247
+ "keywords:forbidden_words",
248
+ "keywords:letter_frequency",
249
+ "language:response_language",
250
+ "length_constraints:number_sentences",
251
+ "length_constraints:number_paragraphs",
252
+ "length_constraints:number_words",
253
+ "length_constraints:nth_paragraph_first_word",
254
+ "detectable_content:number_placeholders",
255
+ "detectable_content:postscript",
256
+ "detectable_format:number_bullet_lists",
257
+ "detectable_format:constrained_response",
258
+ "detectable_format:number_highlighted_sections",
259
+ "detectable_format:multiple_sections",
260
+ "detectable_format:json_format",
261
+ "detectable_format:title",
262
+ "combination:two_responses",
263
+ "combination:repeat_prompt",
264
+ "startend:end_checker",
265
+ "change_case:capital_word_frequency",
266
+ "change_case:english_capital",
267
+ "change_case:english_lowercase",
268
+ "punctuation:no_comma",
269
+ "startend:quotation",
270
+ "keywords:word_once",
271
+ "keywords:word_count_diff_numb",
272
+ "keywords:exclude_word_harder",
273
+ "keywords:letter_counting2",
274
+ "length_constraints:paragraphs_two_line_breaks",
275
+ "startend:first_word_sent",
276
+ "startend:first_word_answer",
277
+ "startend:last_word_sent",
278
+ "startend:last_word_answer",
279
+ "detectable_format:bigram_wrapping",
280
+ "combination:copying_simple",
281
+ "combination:copying_multiple",
282
+ "punctuation:no_dot",
283
+ "punctuation:no_exclamation",
284
+ "keywords:lowercase_counting",
285
+ "length_constraints:letter_total_count",
286
+ "length_constraints:counting_composition",
287
+ "keywords:count_unique",
288
+ "keywords:count_increment_word",
289
+ "keywords:palindrome",
290
+ "keywords:keyword_specific_pos",
291
+ "startend:start_end_same_word",
292
+ "combination:repeat_phrase",
293
+ "keywords:no_adjacent_consecutive",
294
+ "detectable_format:square_brackets_word",
295
+ "detectable_format:sentence_hyphens",
296
+ "combination:copy_instruction",
297
+ "combination:copy_span_idx",
298
+ ],
299
+ },
300
+ }
301
+ },
302
+ "required": ["instruction_id_list"],
303
+ }
synthgen/PtPersonaIFGen/main_ifeval_code/phase1_magpie.py ADDED
@@ -0,0 +1,537 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Phase 1: Persona-Driven Instruction + Response Generation
3
+
4
+ Generates IFEval-style training data using a persona-driven two-step approach,
5
+ following the TULU 3 / Persona Hub methodology (Chan et al., 2024).
6
+
7
+ Constraint selection is PROGRAMMATIC: constraints are cycled through in order
8
+ (round-robin), so each constraint gets equal coverage across the dataset.
9
+ The cycle restarts from the beginning after the last constraint is used.
10
+
11
+ Step 1: Generate User Instruction (persona-conditioned + constraint-assigned)
12
+ - A constraint is selected deterministically based on item_id % len(constraints)
13
+ - System prompt: a simple instruction-generation prompt
14
+ - User prompt: the persona + the exact constraint phrasing to embed
15
+ - Model generates a natural user instruction that includes the given constraint
16
+
17
+ Step 2: Generate Assistant Response
18
+ - System prompt: IFEVAL_SYSTEM_PROMPT (constraint-following instructions)
19
+ - User prompt: The generated instruction from Step 1
20
+ - Model generates a response that follows the constraints
21
+
22
+ This approach:
23
+ - Guarantees uniform constraint coverage across the dataset
24
+ - Provides behavioral/perspective diversity through ~250K personas
25
+ - Ensures constraint consistency between instruction and response
26
+ """
27
+
28
+ import argparse
29
+ import asyncio
30
+ from typing import Optional, Dict, Any, Iterator
31
+
32
+ from openai import AsyncOpenAI
33
+ from tqdm import tqdm
34
+
35
+ from main_ifeval_code.config import (
36
+ VLLM_BASE_URL,
37
+ VLLM_API_KEY,
38
+ PERSONA_DATASET_ID,
39
+ PERSONA_DATASET_SUBSET,
40
+ PERSONA_TEXT_FIELD,
41
+ PHASE1_INSTRUCTION_TEMPERATURE,
42
+ PHASE1_RESPONSE_TEMPERATURE,
43
+ PHASE1_TOP_P,
44
+ PHASE1_MAX_TOKENS_INSTRUCTION,
45
+ PHASE1_MAX_TOKENS_RESPONSE,
46
+ DEFAULT_BATCH_SIZE,
47
+ DEFAULT_NUM_ROWS,
48
+ PHASE1_OUTPUT,
49
+ )
50
+ from main_ifeval_code.utils import get_last_processed_id, write_jsonl_batch
51
+ from main_ifeval_code.system_prompts_pt import get_response_system_prompt
52
+
53
+
54
+ # -----------------------------------------------------------------------
55
+ # ASYNC CLIENT
56
+ # -----------------------------------------------------------------------
57
+ client = AsyncOpenAI(
58
+ base_url=VLLM_BASE_URL,
59
+ api_key=VLLM_API_KEY,
60
+ )
61
+
62
+
63
+ # -----------------------------------------------------------------------
64
+ # CONSTRAINT SCHEDULE
65
+ # Ordered list of (constraint_id, portuguese_phrasing) pairs.
66
+ # Constraints are assigned round-robin: item_id % len(CONSTRAINT_SCHEDULE).
67
+ # -----------------------------------------------------------------------
68
+ # Ordered list of constraint IDs to cycle through (round-robin).
69
+ # No phrasings here — the model should generate a natural instruction that
70
+ # implies the constraint, not copy-paste a canned phrasing.
71
+ CONSTRAINT_SCHEDULE = [
72
+ # ---- Original IFEval constraints ----
73
+ "language:response_language",
74
+ "length_constraints:number_sentences",
75
+ "detectable_content:number_placeholders",
76
+ "detectable_format:number_bullet_lists",
77
+ "detectable_format:constrained_response",
78
+ "detectable_format:number_highlighted_sections",
79
+ "detectable_format:multiple_sections",
80
+ "length_constraints:number_paragraphs",
81
+ "detectable_content:postscript",
82
+ "keywords:existence",
83
+ "keywords:frequency",
84
+ "length_constraints:number_words",
85
+ "detectable_format:json_format",
86
+ "keywords:forbidden_words",
87
+ "combination:two_responses",
88
+ "startend:end_checker",
89
+ "detectable_format:title",
90
+ "keywords:letter_frequency",
91
+ "change_case:english_capital",
92
+ "change_case:english_lowercase",
93
+ "punctuation:no_comma",
94
+ "change_case:capital_word_frequency",
95
+ "startend:quotation",
96
+ "combination:repeat_prompt",
97
+ "length_constraints:nth_paragraph_first_word",
98
+ # ---- IFTrain out-of-distribution constraints ----
99
+ "keywords:word_once",
100
+ "keywords:word_count_diff_numb",
101
+ "keywords:exclude_word_harder",
102
+ "keywords:letter_counting2",
103
+ "length_constraints:paragraphs_two_line_breaks",
104
+ "startend:first_word_sent",
105
+ "startend:first_word_answer",
106
+ "startend:last_word_sent",
107
+ "startend:last_word_answer",
108
+ "detectable_format:bigram_wrapping",
109
+ "combination:copying_simple",
110
+ "combination:copying_multiple",
111
+ "punctuation:no_dot",
112
+ "punctuation:no_exclamation",
113
+ "keywords:lowercase_counting",
114
+ "length_constraints:letter_total_count",
115
+ "length_constraints:counting_composition",
116
+ "keywords:count_unique",
117
+ "keywords:count_increment_word",
118
+ "keywords:palindrome",
119
+ "keywords:keyword_specific_pos",
120
+ "startend:start_end_same_word",
121
+ "combination:repeat_phrase",
122
+ "keywords:no_adjacent_consecutive",
123
+ "detectable_format:square_brackets_word",
124
+ "detectable_format:sentence_hyphens",
125
+ "combination:copy_instruction",
126
+ "combination:copy_span_idx",
127
+ ]
128
+
129
+ # Natural-language descriptions of what each constraint requires the *response* to do.
130
+ # Used to guide the instruction-generation model — it reads this description and writes
131
+ # a realistic user request that would naturally call for such a constraint in the answer.
132
+ CONSTRAINT_DESCRIPTIONS: dict[str, str] = {
133
+ "language:response_language": "A resposta deve estar inteiramente em um idioma específico.",
134
+ "length_constraints:number_sentences": "A resposta deve conter um número específico de frases.",
135
+ "detectable_content:number_placeholders": "A resposta deve incluir um certo número de placeholders entre colchetes, como [nome] ou [endereço].",
136
+ "detectable_format:number_bullet_lists": "A resposta deve conter um número específico de itens em lista com marcadores markdown.",
137
+ "detectable_format:constrained_response": "A resposta deve ser exatamente uma de algumas opções predefinidas.",
138
+ "detectable_format:number_highlighted_sections": "A resposta deve destacar um número específico de seções usando markdown.",
139
+ "detectable_format:multiple_sections": "A resposta deve ser dividida em um número específico de seções nomeadas.",
140
+ "length_constraints:number_paragraphs": "A resposta deve conter um número específico de parágrafos separados por um divisor markdown.",
141
+ "detectable_content:postscript": "A resposta deve terminar com um post-scriptum explícito (P.S. ou P.P.S).",
142
+ "keywords:existence": "A resposta deve incluir certas palavras-chave específicas.",
143
+ "keywords:frequency": "Uma palavra específica deve aparecer um número mínimo ou máximo de vezes na resposta.",
144
+ "length_constraints:number_words": "A resposta deve ter um número mínimo ou máximo de palavras.",
145
+ "detectable_format:json_format": "A resposta deve estar inteiramente em formato JSON válido.",
146
+ "keywords:forbidden_words": "A resposta não deve conter certas palavras específicas.",
147
+ "combination:two_responses": "A resposta deve conter duas respostas distintas separadas por asteriscos.",
148
+ "startend:end_checker": "A resposta deve terminar com uma frase exata e específica.",
149
+ "detectable_format:title": "A resposta deve incluir um título entre colchetes angulares duplos.",
150
+ "keywords:letter_frequency": "Uma letra específica deve aparecer um número mínimo ou máximo de vezes na resposta.",
151
+ "change_case:english_capital": "A resposta deve estar inteiramente em letras maiúsculas.",
152
+ "change_case:english_lowercase": "A resposta deve estar inteiramente em letras minúsculas.",
153
+ "punctuation:no_comma": "A resposta não deve conter vírgulas.",
154
+ "change_case:capital_word_frequency": "Um número específico de palavras na resposta deve estar em letras maiúsculas.",
155
+ "startend:quotation": "A resposta deve estar envolvida por aspas duplas.",
156
+ "combination:repeat_prompt": "A resposta deve começar repetindo o pedido original do usuário.",
157
+ "length_constraints:nth_paragraph_first_word": "Um parágrafo específico da resposta deve começar com uma palavra determinada.",
158
+ "keywords:word_once": "Uma palavra específica deve aparecer pelo menos uma vez na resposta.",
159
+ "keywords:word_count_diff_numb": "Uma palavra específica deve aparecer exatamente N vezes na resposta.",
160
+ "keywords:exclude_word_harder": "Uma palavra específica não deve aparecer em nenhum lugar da resposta.",
161
+ "keywords:letter_counting2": "Uma letra específica deve aparecer exatamente N vezes na resposta.",
162
+ "length_constraints:paragraphs_two_line_breaks": "A resposta deve ter parágrafos separados por duas quebras de linha.",
163
+ "startend:first_word_sent": "A primeira palavra de cada frase da resposta deve ser uma palavra específica.",
164
+ "startend:first_word_answer": "A primeira palavra da resposta deve ser uma palavra específica.",
165
+ "startend:last_word_sent": "A última palavra de cada frase da resposta deve ser uma palavra específica.",
166
+ "startend:last_word_answer": "A última palavra da resposta deve ser uma palavra específica.",
167
+ "detectable_format:bigram_wrapping": "Cada par de palavras consecutivas na resposta deve estar entre colchetes angulares duplos.",
168
+ "combination:copying_simple": "A resposta deve ser uma cópia exata do pedido, sem responder ao conteúdo.",
169
+ "combination:copying_multiple": "A resposta deve repetir o pedido várias vezes, separado por asteriscos, sem responder ao conteúdo.",
170
+ "punctuation:no_dot": "A resposta não deve conter pontos finais.",
171
+ "punctuation:no_exclamation": "A resposta não deve conter pontos de exclamação.",
172
+ "keywords:lowercase_counting": "Cada palavra em minúsculas na resposta pode aparecer no máximo N vezes.",
173
+ "length_constraints:letter_total_count": "A resposta deve conter um número mínimo ou máximo de letras no total.",
174
+ "length_constraints:counting_composition": "A resposta deve ter exatamente 3 parágrafos, cada um com um número fixo de frases e palavras por frase.",
175
+ "keywords:count_unique": "A resposta deve usar apenas palavras únicas — nenhuma palavra pode se repetir.",
176
+ "keywords:count_increment_word": "Duas palavras específicas devem aparecer com frequências diferentes: uma vez e duas vezes respectivamente.",
177
+ "keywords:palindrome": "A resposta deve incluir um palíndromo.",
178
+ "keywords:keyword_specific_pos": "Uma palavra específica deve aparecer em uma posição exata dentro de uma frase específica.",
179
+ "startend:start_end_same_word": "A resposta deve começar e terminar com a mesma palavra.",
180
+ "combination:repeat_phrase": "Uma frase específica deve ser repetida várias vezes, com uma palavra diferente substituída a cada vez.",
181
+ "keywords:no_adjacent_consecutive": "Nenhuma palavra adjacente na resposta pode começar com letras consecutivas do alfabeto.",
182
+ "detectable_format:square_brackets_word": "Cada palavra da resposta deve estar entre colchetes.",
183
+ "detectable_format:sentence_hyphens": "As frases da resposta devem ser conectadas por hífens, sem espaços.",
184
+ "combination:copy_instruction": "A resposta deve ser uma cópia literal da instrução recebida, sem segui-la.",
185
+ "combination:copy_span_idx": "A resposta deve conter um trecho específico do texto original, identificado por índices de caracteres.",
186
+ }
187
+
188
+ def get_constraint_for_item(item_id: int) -> str:
189
+ """
190
+ Return the constraint_id for a given item_id.
191
+ Cycles through CONSTRAINT_SCHEDULE in order, wrapping around.
192
+ """
193
+ return CONSTRAINT_SCHEDULE[item_id % len(CONSTRAINT_SCHEDULE)]
194
+
195
+
196
+ # -----------------------------------------------------------------------
197
+ # PERSONA PROVIDER
198
+ # -----------------------------------------------------------------------
199
+ class PersonaProvider:
200
+ """
201
+ Streams personas from the Persona Hub dataset (Chan et al., 2024).
202
+ Each call to get_persona() returns a single persona string.
203
+ The dataset is streamed lazily and restarts when exhausted.
204
+ """
205
+
206
+ def __init__(
207
+ self,
208
+ dataset_id: str,
209
+ subset: str,
210
+ text_field: str,
211
+ ):
212
+ self.dataset_id = dataset_id
213
+ self.subset = subset
214
+ self.text_field = text_field
215
+ self._iterator: Optional[Iterator] = None
216
+ self._current_index: int = -1
217
+
218
+ def _get_iterator(self) -> Iterator:
219
+ """Lazy-load the dataset iterator."""
220
+ if self._iterator is None:
221
+ from datasets import load_dataset
222
+ ds = load_dataset(
223
+ self.dataset_id,
224
+ self.subset,
225
+ split="train",
226
+ streaming=True,
227
+ )
228
+ self._iterator = iter(ds)
229
+ return self._iterator
230
+
231
+ def get_persona(self) -> Dict[str, Any]:
232
+ """
233
+ Get the next persona from the dataset.
234
+ Returns a dict with 'persona' (str) and 'persona_index' (int).
235
+ Automatically restarts the iterator if the dataset is exhausted.
236
+ """
237
+ iterator = self._get_iterator()
238
+
239
+ while True:
240
+ try:
241
+ record = next(iterator)
242
+ self._current_index += 1
243
+ except StopIteration:
244
+ # Restart when exhausted (allows generating more rows than personas)
245
+ self._iterator = None
246
+ self._current_index = -1
247
+ iterator = self._get_iterator()
248
+ record = next(iterator)
249
+ self._current_index += 1
250
+
251
+ persona_text = record.get(self.text_field, "")
252
+ if not isinstance(persona_text, str) or not persona_text.strip():
253
+ continue
254
+
255
+ return {
256
+ "persona": persona_text.strip(),
257
+ "persona_index": self._current_index,
258
+ }
259
+
260
+
261
+ def get_persona_provider() -> PersonaProvider:
262
+ """Factory function to create the PersonaProvider."""
263
+ return PersonaProvider(
264
+ dataset_id=PERSONA_DATASET_ID,
265
+ subset=PERSONA_DATASET_SUBSET,
266
+ text_field=PERSONA_TEXT_FIELD,
267
+ )
268
+
269
+
270
+ # -----------------------------------------------------------------------
271
+ # STEP 1: GENERATE USER INSTRUCTION (persona-conditioned, constraint-assigned)
272
+ # -----------------------------------------------------------------------
273
+
274
+ _INSTRUCTION_GENERATOR_SYSTEM_PROMPT = (
275
+ "Você é um gerador de dados de treino para modelos de linguagem. "
276
+ "Você recebe uma persona e uma restrição de formatação, e escreve uma instrução "
277
+ "realista em português que alguém faria a um assistente de IA."
278
+ )
279
+
280
+
281
+ def build_instruction_messages(persona: str, constraint_id: str) -> list:
282
+ """
283
+ Build the messages list for the instruction-generation LLM call.
284
+
285
+ System prompt: minimal role definition only.
286
+ User prompt: persona + constraint description + task.
287
+
288
+ The model reads the constraint description (what the response must do) and
289
+ writes a natural user request that would require that constraint to be followed.
290
+ It should choose concrete values for any open parameters (specific words,
291
+ numbers, letters, etc.) that fit the persona and topic.
292
+ """
293
+ description = CONSTRAINT_DESCRIPTIONS[constraint_id]
294
+
295
+ user_prompt = (
296
+ f"Persona: {persona}\n\n"
297
+ f"Restrição que a resposta do assistente deve seguir: {description}\n\n"
298
+ f"Escreva uma pergunta ou pedido realista que essa persona faria a um assistente de IA. "
299
+ f"A instrução deve pedir algo concreto e útil, e deve incluir a restrição de forma natural no final — "
300
+ f"com valores específicos escolhidos por você (palavras, números, letras, frases) que façam sentido "
301
+ f"para o contexto. Use a persona apenas como inspiração para o tópico e tom, não force referências a ela."
302
+ )
303
+
304
+ return [
305
+ {"role": "system", "content": _INSTRUCTION_GENERATOR_SYSTEM_PROMPT},
306
+ {"role": "user", "content": user_prompt},
307
+ ]
308
+
309
+
310
+ async def generate_user_instruction(
311
+ model_id: str,
312
+ persona: str,
313
+ constraint_id: str,
314
+ ) -> Optional[str]:
315
+ """
316
+ Generate a user instruction conditioned on a persona and a pre-selected constraint.
317
+ """
318
+ messages = build_instruction_messages(persona, constraint_id)
319
+
320
+ try:
321
+ print("Printing user instruction: ", messages)
322
+ resp = await client.chat.completions.create(
323
+ model=model_id,
324
+ messages=messages,
325
+ temperature=PHASE1_INSTRUCTION_TEMPERATURE,
326
+ top_p=PHASE1_TOP_P,
327
+ max_tokens=PHASE1_MAX_TOKENS_INSTRUCTION,
328
+ )
329
+ instruction = resp.choices[0].message.content
330
+ return instruction.strip() if instruction else None
331
+ except Exception as e:
332
+ print(f"Error generating instruction: {e}")
333
+ return None
334
+
335
+
336
+ # -----------------------------------------------------------------------
337
+ # STEP 2: GENERATE ASSISTANT RESPONSE
338
+ # -----------------------------------------------------------------------
339
+ async def generate_response(
340
+ model_id: str,
341
+ instruction: str,
342
+ constraint_id: str,
343
+ ) -> Optional[str]:
344
+ """
345
+ Generate an assistant response for the given instruction.
346
+
347
+ Uses a focused per-constraint system prompt so the model only sees
348
+ the one rule it must follow, not all 53.
349
+
350
+ Args:
351
+ model_id: The vLLM model ID
352
+ instruction: The user instruction (with embedded constraint)
353
+ constraint_id: The constraint ID used to select the system prompt
354
+
355
+ Returns:
356
+ An assistant response that follows the constraint
357
+ """
358
+ messages = [
359
+ {"role": "system", "content": get_response_system_prompt(constraint_id)},
360
+ {"role": "user", "content": instruction},
361
+ ]
362
+
363
+ try:
364
+ print("Printing user instruction: ", messages)
365
+ resp = await client.chat.completions.create(
366
+ model=model_id,
367
+ messages=messages,
368
+ temperature=PHASE1_RESPONSE_TEMPERATURE,
369
+ top_p=PHASE1_TOP_P,
370
+ max_tokens=PHASE1_MAX_TOKENS_RESPONSE,
371
+ )
372
+ response = resp.choices[0].message.content
373
+ return response.strip() if response else None
374
+ except Exception as e:
375
+ print(f"Error generating response: {e}")
376
+ return None
377
+
378
+
379
+ # -----------------------------------------------------------------------
380
+ # COMBINED: GENERATE INSTRUCTION-RESPONSE PAIR
381
+ # -----------------------------------------------------------------------
382
+ async def generate_instruction_response_pair(
383
+ model_id: str,
384
+ item_id: int,
385
+ persona_provider: PersonaProvider,
386
+ ) -> Optional[Dict[str, Any]]:
387
+ """
388
+ Generate a complete instruction-response pair.
389
+
390
+ The constraint is selected deterministically from CONSTRAINT_SCHEDULE
391
+ using item_id % len(CONSTRAINT_SCHEDULE), cycling in order.
392
+
393
+ Returns:
394
+ Dict with id, instruction, response, persona, persona_index,
395
+ constraint_id, and constraint_index.
396
+ """
397
+ # Determine constraint for this item (round-robin)
398
+ constraint_id = get_constraint_for_item(item_id)
399
+ constraint_index = item_id % len(CONSTRAINT_SCHEDULE)
400
+
401
+ # Sample a persona
402
+ persona_data = persona_provider.get_persona()
403
+ persona = persona_data["persona"]
404
+ persona_index = persona_data["persona_index"]
405
+
406
+ # Step 1: Generate user instruction conditioned on persona + constraint
407
+ instruction = await generate_user_instruction(model_id, persona, constraint_id)
408
+ if not instruction:
409
+ return None
410
+
411
+ # Step 2: Generate assistant response (focused system prompt for this constraint)
412
+ response = await generate_response(model_id, instruction, constraint_id)
413
+ if not response:
414
+ return None
415
+
416
+ return {
417
+ "id": item_id,
418
+ "instruction": instruction,
419
+ "response": response,
420
+ "persona": persona,
421
+ "persona_index": persona_index,
422
+ "constraint_id": constraint_id,
423
+ "constraint_index": constraint_index,
424
+ }
425
+
426
+
427
+ # -----------------------------------------------------------------------
428
+ # BATCH PROCESSING
429
+ # -----------------------------------------------------------------------
430
+ async def process_batch(
431
+ model_id: str,
432
+ start_id: int,
433
+ batch_size: int,
434
+ persona_provider: PersonaProvider,
435
+ ) -> list[Dict[str, Any]]:
436
+ """Process a batch of generations concurrently."""
437
+ tasks = [
438
+ generate_instruction_response_pair(model_id, start_id + i, persona_provider)
439
+ for i in range(batch_size)
440
+ ]
441
+ results = await asyncio.gather(*tasks)
442
+ # Filter out None results
443
+ return [r for r in results if r is not None]
444
+
445
+
446
+ # -----------------------------------------------------------------------
447
+ # MAIN
448
+ # -----------------------------------------------------------------------
449
+ async def main(
450
+ output_file: str = PHASE1_OUTPUT,
451
+ num_rows: int = DEFAULT_NUM_ROWS,
452
+ batch_size: int = DEFAULT_BATCH_SIZE,
453
+ ):
454
+ # Discover model
455
+ try:
456
+ models_resp = await client.models.list()
457
+ model_id = models_resp.data[0].id
458
+ print(f"Using model: {model_id}")
459
+ except Exception as e:
460
+ print(f"Could not list models. Is vLLM running? Error: {e}")
461
+ return
462
+
463
+ # Initialize persona provider
464
+ print(f"Persona source: {PERSONA_DATASET_ID}/{PERSONA_DATASET_SUBSET}")
465
+ persona_provider = get_persona_provider()
466
+
467
+ print(
468
+ f"Constraint schedule: {len(CONSTRAINT_SCHEDULE)} constraints, cycling in order.\n"
469
+ f" First: {CONSTRAINT_SCHEDULE[0][0]}\n"
470
+ f" Last: {CONSTRAINT_SCHEDULE[-1][0]}"
471
+ )
472
+
473
+ # Resume from last processed ID
474
+ last_id = get_last_processed_id(output_file)
475
+ start_from_id = last_id + 1
476
+
477
+ if start_from_id > 0:
478
+ resume_constraint_id = get_constraint_for_item(start_from_id)[0]
479
+ print(f"Resuming from ID: {start_from_id} (constraint: {resume_constraint_id})")
480
+ else:
481
+ print("Starting from scratch.")
482
+
483
+ # Calculate remaining rows
484
+ remaining_rows = num_rows - start_from_id
485
+ if remaining_rows <= 0:
486
+ print(f"Already generated {num_rows} rows. Nothing to do.")
487
+ return
488
+
489
+ print(f"Generating {remaining_rows} persona-driven instruction-response pairs...")
490
+
491
+ # Process in batches
492
+ current_id = start_from_id
493
+ pbar = tqdm(total=remaining_rows, initial=0, desc="Generating")
494
+
495
+ while current_id < num_rows:
496
+ # Calculate batch size for this iteration
497
+ this_batch_size = min(batch_size, num_rows - current_id)
498
+
499
+ # Generate batch
500
+ results = await process_batch(
501
+ model_id, current_id, this_batch_size, persona_provider
502
+ )
503
+
504
+ # Write results
505
+ if results:
506
+ write_jsonl_batch(output_file, results)
507
+ pbar.update(len(results))
508
+
509
+ current_id += this_batch_size
510
+
511
+ pbar.close()
512
+ print(f"Phase 1 complete. Output: {output_file}")
513
+
514
+
515
+ if __name__ == "__main__":
516
+ parser = argparse.ArgumentParser(
517
+ description="Phase 1: Persona-Driven Instruction + Response Generation"
518
+ )
519
+ parser.add_argument(
520
+ "--output", default=PHASE1_OUTPUT,
521
+ help="Output JSONL file"
522
+ )
523
+ parser.add_argument(
524
+ "--num-rows", type=int, default=DEFAULT_NUM_ROWS,
525
+ help="Number of rows to generate"
526
+ )
527
+ parser.add_argument(
528
+ "--batch-size", type=int, default=DEFAULT_BATCH_SIZE,
529
+ help="Batch size for concurrent requests"
530
+ )
531
+ args = parser.parse_args()
532
+
533
+ asyncio.run(main(
534
+ output_file=args.output,
535
+ num_rows=args.num_rows,
536
+ batch_size=args.batch_size,
537
+ ))
synthgen/PtPersonaIFGen/main_ifeval_code/phase2_detection.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Phase 2: Constraint Type Detection
3
+
4
+ Replicates IFEvalInstructionIdListAssignator from pipeline/ifeval_tasks.py (lines 14-40).
5
+ Takes each instruction and classifies which of the 25 IFEval constraint types are present.
6
+
7
+ System Prompt: pipeline/system_prompts.py -> IFEVAL_INSTRUCTION_ID_LIST_ASSIGNATOR_SYSTEM_PROMPT (lines 55-93)
8
+ JSON Schema: pipeline/json_schemas.py -> IFEVAL_INSTRUCTION_ID_LIST_JSON_SCHEMA (lines 154-193)
9
+ Generation Params: pipeline/pipeline.py (lines 207-209)
10
+
11
+ Input Format (from format_input, ifeval_tasks.py lines 19-28):
12
+ messages = [
13
+ {"role": "system", "content": SYSTEM_PROMPT},
14
+ {"role": "user", "content": instruction}
15
+ ]
16
+ """
17
+
18
+ import argparse
19
+ import asyncio
20
+ import json
21
+ from typing import Optional
22
+
23
+ from openai import AsyncOpenAI
24
+ from tqdm import tqdm
25
+
26
+ from main_ifeval_code.config import (
27
+ VLLM_BASE_URL,
28
+ VLLM_API_KEY,
29
+ IFEVAL_INSTRUCTION_ID_LIST_ASSIGNATOR_SYSTEM_PROMPT,
30
+ IFEVAL_INSTRUCTION_ID_LIST_JSON_SCHEMA,
31
+ PHASE2_TEMPERATURE,
32
+ PHASE2_MAX_TOKENS,
33
+ DEFAULT_BATCH_SIZE,
34
+ PHASE1_OUTPUT,
35
+ PHASE2_OUTPUT,
36
+ )
37
+ from main_ifeval_code.utils import (
38
+ get_last_processed_id,
39
+ iter_jsonl_batches,
40
+ write_jsonl_batch,
41
+ count_jsonl_lines,
42
+ )
43
+
44
+
45
+ # -----------------------------------------------------------------------
46
+ # ASYNC CLIENT
47
+ # -----------------------------------------------------------------------
48
+ client = AsyncOpenAI(
49
+ base_url=VLLM_BASE_URL,
50
+ api_key=VLLM_API_KEY,
51
+ )
52
+
53
+
54
+ # -----------------------------------------------------------------------
55
+ # MESSAGE CONSTRUCTION
56
+ # Replicates IFEvalInstructionIdListAssignator.format_input()
57
+ # from pipeline/ifeval_tasks.py lines 19-28
58
+ # -----------------------------------------------------------------------
59
+ def build_messages(instruction: str) -> list[dict]:
60
+ """
61
+ Build chat messages for constraint detection.
62
+ Exact replication of format_input() from ifeval_tasks.py.
63
+ """
64
+ return [
65
+ {
66
+ "role": "system",
67
+ "content": IFEVAL_INSTRUCTION_ID_LIST_ASSIGNATOR_SYSTEM_PROMPT,
68
+ },
69
+ {"role": "user", "content": instruction},
70
+ ]
71
+
72
+
73
+ # -----------------------------------------------------------------------
74
+ # OUTPUT PARSING
75
+ # Replicates IFEvalInstructionIdListAssignator.format_output()
76
+ # from pipeline/ifeval_tasks.py lines 34-40
77
+ # -----------------------------------------------------------------------
78
+ def parse_output(output: str | None) -> dict:
79
+ """
80
+ Parse the LLM output to extract instruction_id_list.
81
+ Exact replication of format_output() from ifeval_tasks.py.
82
+ """
83
+ if output is None:
84
+ return {"instruction_id_list": None}
85
+
86
+ try:
87
+ return json.loads(output)
88
+ except json.JSONDecodeError:
89
+ return {"instruction_id_list": None}
90
+
91
+
92
+ # -----------------------------------------------------------------------
93
+ # VLLM CALL WITH GUIDED JSON
94
+ # Based on example.py lines 146-164
95
+ # -----------------------------------------------------------------------
96
+ async def detect_constraints(
97
+ model_id: str,
98
+ item: dict,
99
+ ) -> Optional[dict]:
100
+ """
101
+ Detect constraint types in an instruction.
102
+ Uses guided JSON decoding with IFEVAL_INSTRUCTION_ID_LIST_JSON_SCHEMA.
103
+ """
104
+ instruction = item.get("instruction")
105
+ if not instruction:
106
+ return None
107
+
108
+ messages = build_messages(instruction)
109
+
110
+ try:
111
+ resp = await client.chat.completions.create(
112
+ model=model_id,
113
+ messages=messages,
114
+ temperature=PHASE2_TEMPERATURE,
115
+ max_tokens=PHASE2_MAX_TOKENS,
116
+ extra_body={"guided_json": IFEVAL_INSTRUCTION_ID_LIST_JSON_SCHEMA},
117
+ )
118
+ raw_output = resp.choices[0].message.content
119
+ parsed = parse_output(raw_output)
120
+
121
+ # Build output record
122
+ return {
123
+ "id": item["id"],
124
+ "instruction": item["instruction"],
125
+ "response": item["response"],
126
+ "instruction_id_list": parsed.get("instruction_id_list"),
127
+ }
128
+ except Exception as e:
129
+ print(f"Error detecting constraints for id={item.get('id')}: {e}")
130
+ return None
131
+
132
+
133
+ # -----------------------------------------------------------------------
134
+ # BATCH PROCESSING
135
+ # Based on example.py lines 184-195
136
+ # -----------------------------------------------------------------------
137
+ async def process_batch(
138
+ model_id: str,
139
+ batch: list[dict],
140
+ ) -> list[dict]:
141
+ """Process a batch of items concurrently."""
142
+ tasks = [detect_constraints(model_id, item) for item in batch]
143
+ results = await asyncio.gather(*tasks)
144
+ # Filter out None results
145
+ return [r for r in results if r is not None]
146
+
147
+
148
+ # -----------------------------------------------------------------------
149
+ # MAIN
150
+ # -----------------------------------------------------------------------
151
+ async def main(
152
+ input_file: str = PHASE1_OUTPUT,
153
+ output_file: str = PHASE2_OUTPUT,
154
+ batch_size: int = DEFAULT_BATCH_SIZE,
155
+ ):
156
+ # Discover model
157
+ try:
158
+ models_resp = await client.models.list()
159
+ model_id = models_resp.data[0].id
160
+ print(f"Using model: {model_id}")
161
+ except Exception as e:
162
+ print(f"Could not list models. Is vLLM running? Error: {e}")
163
+ return
164
+
165
+ # Resume from last processed ID
166
+ last_id = get_last_processed_id(output_file)
167
+ start_from_id = last_id + 1
168
+
169
+ if start_from_id > 0:
170
+ print(f"Resuming from ID: {start_from_id}")
171
+ else:
172
+ print("Starting from scratch.")
173
+
174
+ # Count total items
175
+ total_items = count_jsonl_lines(input_file)
176
+ remaining = total_items - start_from_id
177
+
178
+ if remaining <= 0:
179
+ print(f"All {total_items} items already processed. Nothing to do.")
180
+ return
181
+
182
+ print(f"Processing {remaining} items from {input_file}...")
183
+
184
+ # Process in batches
185
+ pbar = tqdm(total=remaining, initial=0, desc="Detecting constraints")
186
+
187
+ for batch in iter_jsonl_batches(
188
+ input_file,
189
+ batch_size,
190
+ start_from_id,
191
+ required_fields=["instruction", "response"],
192
+ ):
193
+ results = await process_batch(model_id, batch)
194
+
195
+ if results:
196
+ write_jsonl_batch(output_file, results)
197
+ pbar.update(len(results))
198
+
199
+ pbar.close()
200
+ print(f"Phase 2 complete. Output: {output_file}")
201
+
202
+
203
+ if __name__ == "__main__":
204
+ parser = argparse.ArgumentParser(description="Phase 2: Constraint Detection")
205
+ parser.add_argument("--input", default=PHASE1_OUTPUT, help="Input JSONL file from Phase 1")
206
+ parser.add_argument("--output", default=PHASE2_OUTPUT, help="Output JSONL file")
207
+ parser.add_argument("--batch-size", type=int, default=DEFAULT_BATCH_SIZE, help="Batch size for concurrent requests")
208
+ args = parser.parse_args()
209
+
210
+ asyncio.run(main(
211
+ input_file=args.input,
212
+ output_file=args.output,
213
+ batch_size=args.batch_size,
214
+ ))
synthgen/PtPersonaIFGen/main_ifeval_code/phase3_extraction.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Phase 3: Parameter Extraction
3
+
4
+ Replicates IFEvalKwargsAssignator from pipeline/ifeval_tasks.py (lines 43-70).
5
+ For each detected constraint type, extracts the specific parameter values from the instruction.
6
+
7
+ System Prompt: pipeline/system_prompts.py -> IFEVAL_KWARGS_ASSIGNATOR_SYSTEM_PROMPT (lines 95-227)
8
+ JSON Schema: pipeline/json_schemas.py -> IFEVAL_RESPONSE_VERIFICATION_FUNCTION_ARGUMENTS_JSON_SCHEMA (lines 1-152)
9
+ Generation Params: pipeline/pipeline.py (lines 229-231)
10
+
11
+ Input Format (from format_input, ifeval_tasks.py lines 48-58):
12
+ instruction_id_list_str = "\\n".join(instruction_id_list)
13
+ messages = [
14
+ {"role": "system", "content": SYSTEM_PROMPT},
15
+ {"role": "user", "content": f"## Instruction\\n\\n{instruction}## Instruction ID List\\n\\n{instruction_id_list_str}"}
16
+ ]
17
+ """
18
+
19
+ import argparse
20
+ import asyncio
21
+ import json
22
+ from typing import Optional
23
+
24
+ from openai import AsyncOpenAI
25
+ from tqdm import tqdm
26
+
27
+ from main_ifeval_code.config import (
28
+ VLLM_BASE_URL,
29
+ VLLM_API_KEY,
30
+ IFEVAL_KWARGS_ASSIGNATOR_SYSTEM_PROMPT,
31
+ IFEVAL_RESPONSE_VERIFICATION_FUNCTION_ARGUMENTS_JSON_SCHEMA,
32
+ PHASE3_TEMPERATURE,
33
+ PHASE3_MAX_TOKENS,
34
+ DEFAULT_BATCH_SIZE,
35
+ PHASE2_OUTPUT,
36
+ PHASE3_OUTPUT,
37
+ )
38
+ from main_ifeval_code.utils import (
39
+ get_last_processed_id,
40
+ iter_jsonl_batches,
41
+ write_jsonl_batch,
42
+ count_jsonl_lines,
43
+ )
44
+
45
+
46
+ # -----------------------------------------------------------------------
47
+ # ASYNC CLIENT
48
+ # -----------------------------------------------------------------------
49
+ client = AsyncOpenAI(
50
+ base_url=VLLM_BASE_URL,
51
+ api_key=VLLM_API_KEY,
52
+ )
53
+
54
+
55
+ # -----------------------------------------------------------------------
56
+ # MESSAGE CONSTRUCTION
57
+ # Replicates IFEvalKwargsAssignator.format_input()
58
+ # from pipeline/ifeval_tasks.py lines 48-58
59
+ # -----------------------------------------------------------------------
60
+ def build_messages(instruction: str, instruction_id_list: list[str]) -> list[dict]:
61
+ """
62
+ Build chat messages for parameter extraction.
63
+ Exact replication of format_input() from ifeval_tasks.py.
64
+
65
+ Note: The original code has a subtle formatting issue where there's no
66
+ newline between the instruction and "## Instruction ID List". We replicate
67
+ this exactly.
68
+ """
69
+ instruction_id_list_str = "\n".join(instruction_id_list)
70
+
71
+ return [
72
+ {"role": "system", "content": IFEVAL_KWARGS_ASSIGNATOR_SYSTEM_PROMPT},
73
+ {
74
+ "role": "user",
75
+ "content": f"## Instrução\n\n{instruction}## Lista de IDs de Instrução\n\n{instruction_id_list_str}",
76
+ },
77
+ ]
78
+
79
+
80
+ # -----------------------------------------------------------------------
81
+ # OUTPUT PARSING
82
+ # Replicates IFEvalKwargsAssignator.format_output()
83
+ # from pipeline/ifeval_tasks.py lines 64-70
84
+ # -----------------------------------------------------------------------
85
+ def parse_output(output: str | None) -> dict:
86
+ """
87
+ Parse the LLM output to extract kwargs.
88
+ Exact replication of format_output() from ifeval_tasks.py.
89
+
90
+ Note: The original stores the raw JSON string, not parsed dict.
91
+ """
92
+ if output is None:
93
+ return {"kwargs": None}
94
+
95
+ return {"kwargs": output}
96
+
97
+
98
+ # -----------------------------------------------------------------------
99
+ # VLLM CALL WITH GUIDED JSON
100
+ # Based on example.py lines 146-164
101
+ # -----------------------------------------------------------------------
102
+ async def extract_kwargs(
103
+ model_id: str,
104
+ item: dict,
105
+ ) -> Optional[dict]:
106
+ """
107
+ Extract constraint parameters from an instruction.
108
+ Uses guided JSON decoding with IFEVAL_RESPONSE_VERIFICATION_FUNCTION_ARGUMENTS_JSON_SCHEMA.
109
+ """
110
+ instruction = item.get("instruction")
111
+ instruction_id_list = item.get("instruction_id_list")
112
+
113
+ if not instruction or not instruction_id_list:
114
+ return None
115
+
116
+ messages = build_messages(instruction, instruction_id_list)
117
+
118
+ try:
119
+ resp = await client.chat.completions.create(
120
+ model=model_id,
121
+ messages=messages,
122
+ temperature=PHASE3_TEMPERATURE,
123
+ max_tokens=PHASE3_MAX_TOKENS,
124
+ extra_body={
125
+ "guided_json": IFEVAL_RESPONSE_VERIFICATION_FUNCTION_ARGUMENTS_JSON_SCHEMA
126
+ },
127
+ )
128
+ raw_output = resp.choices[0].message.content
129
+ parsed = parse_output(raw_output)
130
+
131
+ # Build output record
132
+ return {
133
+ "id": item["id"],
134
+ "instruction": item["instruction"],
135
+ "response": item["response"],
136
+ "instruction_id_list": item["instruction_id_list"],
137
+ "kwargs": parsed.get("kwargs"),
138
+ }
139
+ except Exception as e:
140
+ print(f"Error extracting kwargs for id={item.get('id')}: {e}")
141
+ return None
142
+
143
+
144
+ # -----------------------------------------------------------------------
145
+ # BATCH PROCESSING
146
+ # Based on example.py lines 184-195
147
+ # -----------------------------------------------------------------------
148
+ async def process_batch(
149
+ model_id: str,
150
+ batch: list[dict],
151
+ ) -> list[dict]:
152
+ """Process a batch of items concurrently."""
153
+ tasks = [extract_kwargs(model_id, item) for item in batch]
154
+ results = await asyncio.gather(*tasks)
155
+ # Filter out None results
156
+ return [r for r in results if r is not None]
157
+
158
+
159
+ # -----------------------------------------------------------------------
160
+ # MAIN
161
+ # -----------------------------------------------------------------------
162
+ async def main(
163
+ input_file: str = PHASE2_OUTPUT,
164
+ output_file: str = PHASE3_OUTPUT,
165
+ batch_size: int = DEFAULT_BATCH_SIZE,
166
+ ):
167
+ # Discover model
168
+ try:
169
+ models_resp = await client.models.list()
170
+ model_id = models_resp.data[0].id
171
+ print(f"Using model: {model_id}")
172
+ except Exception as e:
173
+ print(f"Could not list models. Is vLLM running? Error: {e}")
174
+ return
175
+
176
+ # Resume from last processed ID
177
+ last_id = get_last_processed_id(output_file)
178
+ start_from_id = last_id + 1
179
+
180
+ if start_from_id > 0:
181
+ print(f"Resuming from ID: {start_from_id}")
182
+ else:
183
+ print("Starting from scratch.")
184
+
185
+ # Count total items
186
+ total_items = count_jsonl_lines(input_file)
187
+ remaining = total_items - start_from_id
188
+
189
+ if remaining <= 0:
190
+ print(f"All {total_items} items already processed. Nothing to do.")
191
+ return
192
+
193
+ print(f"Processing {remaining} items from {input_file}...")
194
+
195
+ # Process in batches
196
+ pbar = tqdm(total=remaining, initial=0, desc="Extracting kwargs")
197
+
198
+ for batch in iter_jsonl_batches(
199
+ input_file,
200
+ batch_size,
201
+ start_from_id,
202
+ required_fields=["instruction", "response", "instruction_id_list"],
203
+ ):
204
+ results = await process_batch(model_id, batch)
205
+
206
+ if results:
207
+ write_jsonl_batch(output_file, results)
208
+ pbar.update(len(results))
209
+
210
+ pbar.close()
211
+ print(f"Phase 3 complete. Output: {output_file}")
212
+
213
+
214
+ if __name__ == "__main__":
215
+ parser = argparse.ArgumentParser(description="Phase 3: Parameter Extraction")
216
+ parser.add_argument("--input", default=PHASE2_OUTPUT, help="Input JSONL file from Phase 2")
217
+ parser.add_argument("--output", default=PHASE3_OUTPUT, help="Output JSONL file")
218
+ parser.add_argument("--batch-size", type=int, default=DEFAULT_BATCH_SIZE, help="Batch size for concurrent requests")
219
+ args = parser.parse_args()
220
+
221
+ asyncio.run(main(
222
+ input_file=args.input,
223
+ output_file=args.output,
224
+ batch_size=args.batch_size,
225
+ ))
synthgen/PtPersonaIFGen/main_ifeval_code/phase4_postprocess.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Phase 4: Post-Processing
3
+
4
+ Reuses logic from filter_ifeval_data.py to:
5
+ 1. Restructure kwargs (flat dict -> per-instruction list)
6
+ 2. Filter conflicting constraints
7
+ 3. Validate responses with lm-eval IFEval checker
8
+ 4. Keep only samples where prompt_level_strict_acc == True
9
+
10
+ Key Functions from filter_ifeval_data.py:
11
+ - build_instruction_kwargs() (lines 257-285)
12
+ - filter_not_valid_rows() (lines 288-305)
13
+ - get_ifeval_results() (lines 308-321)
14
+
15
+ Key Data Structures from filter_ifeval_data.py:
16
+ - INSTRUCTION_ARGS (lines 11-41)
17
+ - LANGUAGE_TO_CODE (lines 223-254)
18
+ - IFEVAL_INSTRUCTION_CONFLICTS (lines 70-221)
19
+ """
20
+
21
+ import argparse
22
+ import json
23
+
24
+ from tqdm import tqdm
25
+
26
+ from main_ifeval_code.filter_ifeval_data_pt import (
27
+ INSTRUCTION_ARGS,
28
+ LANGUAGE_TO_CODE,
29
+ IFEVAL_INSTRUCTION_CONFLICTS,
30
+ build_instruction_kwargs,
31
+ filter_not_valid_rows,
32
+ get_ifeval_results,
33
+ )
34
+
35
+ from main_ifeval_code.config import (
36
+ PHASE3_OUTPUT,
37
+ PHASE4_OUTPUT,
38
+ )
39
+ from main_ifeval_code.utils import (
40
+ iter_jsonl_batches,
41
+ write_jsonl_line,
42
+ count_jsonl_lines,
43
+ )
44
+
45
+
46
+ # -----------------------------------------------------------------------
47
+ # STEP 4.1: RESTRUCTURE KWARGS
48
+ # Reuses build_instruction_kwargs() from filter_ifeval_data.py (lines 257-285)
49
+ # -----------------------------------------------------------------------
50
+ def restructure_kwargs(item: dict) -> dict:
51
+ """
52
+ Transform flat kwargs dict into per-instruction kwargs list.
53
+ Wraps build_instruction_kwargs() from filter_ifeval_data.py.
54
+ """
55
+ result = build_instruction_kwargs(item)
56
+ item["kwargs"] = result.get("kwargs", item.get("kwargs"))
57
+ item["valid_kwargs_json"] = result.get("valid_kwargs_json", False)
58
+ return item
59
+
60
+
61
+ # -----------------------------------------------------------------------
62
+ # STEP 4.2: FILTER CONFLICTING CONSTRAINTS
63
+ # Reuses filter_not_valid_rows() from filter_ifeval_data.py (lines 288-305)
64
+ # -----------------------------------------------------------------------
65
+ def is_valid_row(item: dict) -> bool:
66
+ """
67
+ Check if a row has valid kwargs and no conflicting constraints.
68
+ Wraps filter_not_valid_rows() from filter_ifeval_data.py.
69
+ """
70
+ return filter_not_valid_rows(item)
71
+
72
+
73
+ # -----------------------------------------------------------------------
74
+ # STEP 4.3: VALIDATE WITH LM-EVAL
75
+ # Reuses get_ifeval_results() from filter_ifeval_data.py (lines 308-321)
76
+ # -----------------------------------------------------------------------
77
+ def validate_response(item: dict) -> dict:
78
+ """
79
+ Validate response against constraints using lm-eval IFEval checker.
80
+ Wraps get_ifeval_results() from filter_ifeval_data.py.
81
+ """
82
+ # Rename instruction to prompt (as done in filter_ifeval_data.py line 329)
83
+ item["prompt"] = item.pop("instruction", item.get("prompt"))
84
+
85
+ # Add 'key' field required by lm-eval process_results
86
+ if "key" not in item:
87
+ item["key"] = item.get("id", 0)
88
+
89
+ results = get_ifeval_results(item)
90
+ item.update(results)
91
+ return item
92
+
93
+
94
+ # -----------------------------------------------------------------------
95
+ # MAIN PROCESSING
96
+ # -----------------------------------------------------------------------
97
+ def process_item(item: dict) -> dict | None:
98
+ """
99
+ Process a single item through all post-processing steps.
100
+ Returns None if the item should be filtered out.
101
+ """
102
+ # Step 4.1: Restructure kwargs
103
+ item = restructure_kwargs(item)
104
+
105
+ # Step 4.2: Filter invalid/conflicting
106
+ if not is_valid_row(item):
107
+ return None
108
+
109
+ # Step 4.3: Validate with lm-eval
110
+ item = validate_response(item)
111
+
112
+ # Step 4.4: Filter by strict accuracy
113
+ if not item.get("prompt_level_strict_acc", False):
114
+ return None
115
+
116
+ # Clean up intermediate fields
117
+ if "valid_kwargs_json" in item:
118
+ del item["valid_kwargs_json"]
119
+
120
+ return item
121
+
122
+
123
+ def main(
124
+ input_file: str = PHASE3_OUTPUT,
125
+ output_file: str = PHASE4_OUTPUT,
126
+ batch_size: int = 100,
127
+ ):
128
+ """
129
+ Run post-processing on Phase 3 output.
130
+
131
+ Note: This is CPU-bound (no LLM calls), so we process sequentially.
132
+ The main bottleneck is the lm-eval validation.
133
+ """
134
+ # Count total items
135
+ total_items = count_jsonl_lines(input_file)
136
+ print(f"Processing {total_items} items from {input_file}...")
137
+
138
+ # Track stats
139
+ stats = {
140
+ "total": 0,
141
+ "invalid_kwargs": 0,
142
+ "conflicts": 0,
143
+ "failed_validation": 0,
144
+ "passed": 0,
145
+ }
146
+
147
+ # Debug: track first few failures of each type
148
+ DEBUG_LIMIT = 10
149
+ debug_samples = {
150
+ "invalid_kwargs": [],
151
+ "conflicts": [],
152
+ "failed_validation": [],
153
+ }
154
+
155
+ # Process items
156
+ pbar = tqdm(total=total_items, desc="Post-processing")
157
+
158
+ for batch in iter_jsonl_batches(
159
+ input_file,
160
+ batch_size,
161
+ start_from_id=0,
162
+ required_fields=["instruction", "response", "instruction_id_list", "kwargs"],
163
+ ):
164
+ for item in batch:
165
+ stats["total"] += 1
166
+ item_id = item.get("id", stats["total"])
167
+
168
+ # Step 4.1: Restructure kwargs
169
+ item = restructure_kwargs(item)
170
+ if not item.get("valid_kwargs_json", False):
171
+ stats["invalid_kwargs"] += 1
172
+ if len(debug_samples["invalid_kwargs"]) < DEBUG_LIMIT:
173
+ debug_samples["invalid_kwargs"].append({
174
+ "id": item_id,
175
+ "raw_kwargs": item.get("kwargs"),
176
+ "instruction_id_list": item.get("instruction_id_list"),
177
+ })
178
+ pbar.update(1)
179
+ continue
180
+
181
+ # Step 4.2: Filter invalid/conflicting
182
+ if not is_valid_row(item):
183
+ stats["conflicts"] += 1
184
+ if len(debug_samples["conflicts"]) < DEBUG_LIMIT:
185
+ debug_samples["conflicts"].append({
186
+ "id": item_id,
187
+ "instruction_id_list": item.get("instruction_id_list"),
188
+ })
189
+ pbar.update(1)
190
+ continue
191
+
192
+ # Step 4.3: Validate with lm-eval
193
+ item = validate_response(item)
194
+
195
+ # Step 4.4: Filter by strict accuracy
196
+ if not item.get("prompt_level_strict_acc", False):
197
+ stats["failed_validation"] += 1
198
+ if len(debug_samples["failed_validation"]) < DEBUG_LIMIT:
199
+ debug_samples["failed_validation"].append({
200
+ "id": item_id,
201
+ "instruction_id_list": item.get("instruction_id_list"),
202
+ "kwargs": item.get("kwargs"),
203
+ "inst_level_strict_acc": item.get("inst_level_strict_acc"),
204
+ "validation_error": item.get("validation_error"), # Capture exception
205
+ "response_preview": item.get("response", "")[:500],
206
+ })
207
+ pbar.update(1)
208
+ continue
209
+
210
+ # Clean up intermediate fields
211
+ if "valid_kwargs_json" in item:
212
+ del item["valid_kwargs_json"]
213
+
214
+ # Write passing item
215
+ write_jsonl_line(output_file, item)
216
+ stats["passed"] += 1
217
+ pbar.update(1)
218
+
219
+ pbar.close()
220
+
221
+ # Print summary
222
+ print("\n" + "=" * 50)
223
+ print("Post-processing Summary")
224
+ print("=" * 50)
225
+ print(f"Total processed: {stats['total']:,}")
226
+ print(f"Invalid kwargs: {stats['invalid_kwargs']:,}")
227
+ print(f"Conflicting: {stats['conflicts']:,}")
228
+ print(f"Failed validation: {stats['failed_validation']:,}")
229
+ print(f"Passed (final): {stats['passed']:,}")
230
+ print(f"Pass rate: {stats['passed']/max(stats['total'],1)*100:.1f}%")
231
+ print("=" * 50)
232
+ print(f"Output: {output_file}")
233
+
234
+ # Print debug samples
235
+ if debug_samples["invalid_kwargs"]:
236
+ print("\n" + "=" * 50)
237
+ print(f"DEBUG: Sample INVALID KWARGS failures (first {DEBUG_LIMIT}):")
238
+ print("=" * 50)
239
+ for sample in debug_samples["invalid_kwargs"]:
240
+ print(f"\n--- ID: {sample['id']} ---")
241
+ print(f"instruction_id_list: {sample['instruction_id_list']}")
242
+ print(f"raw_kwargs: {sample['raw_kwargs'][:500] if sample['raw_kwargs'] else 'None'}...")
243
+
244
+ if debug_samples["conflicts"]:
245
+ print("\n" + "=" * 50)
246
+ print(f"DEBUG: Sample CONFLICT failures (first {DEBUG_LIMIT}):")
247
+ print("=" * 50)
248
+ for sample in debug_samples["conflicts"]:
249
+ print(f"\n--- ID: {sample['id']} ---")
250
+ print(f"instruction_id_list: {sample['instruction_id_list']}")
251
+
252
+ if debug_samples["failed_validation"]:
253
+ print("\n" + "=" * 50)
254
+ print(f"DEBUG: Sample VALIDATION failures (first {DEBUG_LIMIT}):")
255
+ print("=" * 50)
256
+ for sample in debug_samples["failed_validation"]:
257
+ print(f"\n--- ID: {sample['id']} ---")
258
+ print(f"instruction_id_list: {sample['instruction_id_list']}")
259
+ print(f"kwargs: {sample['kwargs']}")
260
+ print(f"inst_level_strict_acc: {sample['inst_level_strict_acc']}")
261
+ if sample.get('validation_error'):
262
+ print(f"EXCEPTION: {sample['validation_error']}")
263
+ print(f"response_preview: {sample['response_preview']}...")
264
+
265
+
266
+ if __name__ == "__main__":
267
+ parser = argparse.ArgumentParser(description="Phase 4: Post-Processing")
268
+ parser.add_argument("--input", default=PHASE3_OUTPUT, help="Input JSONL file from Phase 3")
269
+ parser.add_argument("--output", default=PHASE4_OUTPUT, help="Output JSONL file")
270
+ parser.add_argument("--batch-size", type=int, default=100, help="Batch size for reading")
271
+ args = parser.parse_args()
272
+
273
+ main(
274
+ input_file=args.input,
275
+ output_file=args.output,
276
+ batch_size=args.batch_size,
277
+ )
synthgen/PtPersonaIFGen/main_ifeval_code/run_pipeline.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Main Pipeline Runner
4
+
5
+ Orchestrates the complete IFEval-like data generation pipeline:
6
+ - Phase 1: Magpie generation (instruction + response pairs)
7
+ - Phase 2: Constraint detection (classify constraint types)
8
+ - Phase 3: Parameter extraction (extract kwargs for each constraint)
9
+ - Phase 4: Post-processing (validate and filter)
10
+
11
+ Usage:
12
+ # Run complete pipeline
13
+ python run_pipeline.py --num-rows 200
14
+
15
+ # Run specific phase
16
+ python run_pipeline.py --phase 2 --input phase1_output.jsonl
17
+
18
+ # Run from specific phase onwards
19
+ python run_pipeline.py --start-phase 3
20
+
21
+ Environment Variables:
22
+ VLLM_BASE_URL: vLLM server URL (default: http://localhost:8000/v1)
23
+ VLLM_API_KEY: API key (default: no-key-needed)
24
+ """
25
+
26
+ import argparse
27
+ import asyncio
28
+ import os
29
+ import sys
30
+
31
+ from main_ifeval_code.config import (
32
+ DEFAULT_BATCH_SIZE,
33
+ DEFAULT_NUM_ROWS,
34
+ PHASE1_OUTPUT,
35
+ PHASE2_OUTPUT,
36
+ PHASE3_OUTPUT,
37
+ PHASE4_OUTPUT,
38
+ )
39
+
40
+
41
+ async def run_phase1(num_rows: int, batch_size: int, output: str):
42
+ """Run Phase 1: Magpie Generation"""
43
+ print("\n" + "=" * 60)
44
+ print("PHASE 1: Magpie Instruction + Response Generation")
45
+ print("=" * 60)
46
+
47
+ from main_ifeval_code.phase1_magpie import main as phase1_main
48
+ await phase1_main(
49
+ output_file=output,
50
+ num_rows=num_rows,
51
+ batch_size=batch_size,
52
+ )
53
+
54
+
55
+ async def run_phase2(input_file: str, batch_size: int, output: str):
56
+ """Run Phase 2: Constraint Detection"""
57
+ print("\n" + "=" * 60)
58
+ print("PHASE 2: Constraint Type Detection")
59
+ print("=" * 60)
60
+
61
+ from main_ifeval_code.phase2_detection import main as phase2_main
62
+ await phase2_main(
63
+ input_file=input_file,
64
+ output_file=output,
65
+ batch_size=batch_size,
66
+ )
67
+
68
+
69
+ async def run_phase3(input_file: str, batch_size: int, output: str):
70
+ """Run Phase 3: Parameter Extraction"""
71
+ print("\n" + "=" * 60)
72
+ print("PHASE 3: Parameter Extraction")
73
+ print("=" * 60)
74
+
75
+ from main_ifeval_code.phase3_extraction import main as phase3_main
76
+ await phase3_main(
77
+ input_file=input_file,
78
+ output_file=output,
79
+ batch_size=batch_size,
80
+ )
81
+
82
+
83
+ def run_phase4(input_file: str, batch_size: int, output: str):
84
+ """Run Phase 4: Post-Processing"""
85
+ print("\n" + "=" * 60)
86
+ print("PHASE 4: Post-Processing")
87
+ print("=" * 60)
88
+
89
+ from main_ifeval_code.phase4_postprocess import main as phase4_main
90
+ phase4_main(
91
+ input_file=input_file,
92
+ output_file=output,
93
+ batch_size=batch_size,
94
+ )
95
+
96
+
97
+ async def run_full_pipeline(
98
+ num_rows: int,
99
+ batch_size: int,
100
+ start_phase: int = 1,
101
+ phase1_output: str = PHASE1_OUTPUT,
102
+ phase2_output: str = PHASE2_OUTPUT,
103
+ phase3_output: str = PHASE3_OUTPUT,
104
+ phase4_output: str = PHASE4_OUTPUT,
105
+ ):
106
+ """Run the complete pipeline from start_phase onwards."""
107
+
108
+ if start_phase <= 1:
109
+ await run_phase1(num_rows, batch_size, phase1_output)
110
+
111
+ if start_phase <= 2:
112
+ await run_phase2(phase1_output, batch_size, phase2_output)
113
+
114
+ if start_phase <= 3:
115
+ await run_phase3(phase2_output, batch_size, phase3_output)
116
+
117
+ if start_phase <= 4:
118
+ run_phase4(phase3_output, batch_size, phase4_output)
119
+
120
+ print("\n" + "=" * 60)
121
+ print("PIPELINE COMPLETE")
122
+ print("=" * 60)
123
+ print(f"Final output: {phase4_output}")
124
+
125
+
126
+ async def run_single_phase(
127
+ phase: int,
128
+ input_file: str | None,
129
+ output_file: str | None,
130
+ batch_size: int,
131
+ num_rows: int,
132
+ ):
133
+ """Run a single specific phase."""
134
+
135
+ if phase == 1:
136
+ output = output_file or PHASE1_OUTPUT
137
+ await run_phase1(num_rows, batch_size, output)
138
+
139
+ elif phase == 2:
140
+ input_f = input_file or PHASE1_OUTPUT
141
+ output = output_file or PHASE2_OUTPUT
142
+ await run_phase2(input_f, batch_size, output)
143
+
144
+ elif phase == 3:
145
+ input_f = input_file or PHASE2_OUTPUT
146
+ output = output_file or PHASE3_OUTPUT
147
+ await run_phase3(input_f, batch_size, output)
148
+
149
+ elif phase == 4:
150
+ input_f = input_file or PHASE3_OUTPUT
151
+ output = output_file or PHASE4_OUTPUT
152
+ run_phase4(input_f, batch_size, output)
153
+
154
+ else:
155
+ print(f"Invalid phase: {phase}. Must be 1-4.")
156
+ sys.exit(1)
157
+
158
+
159
+ def main():
160
+ parser = argparse.ArgumentParser(
161
+ description="IFEval-like Data Generation Pipeline",
162
+ formatter_class=argparse.RawDescriptionHelpFormatter,
163
+ epilog="""
164
+ Examples:
165
+ # Run complete pipeline with 10k rows
166
+ python run_pipeline.py --num-rows 10000
167
+
168
+ # Run only Phase 2 with custom input
169
+ python run_pipeline.py --phase 2 --input my_phase1.jsonl
170
+
171
+ # Resume pipeline from Phase 3
172
+ python run_pipeline.py --start-phase 3
173
+
174
+ # Run Phase 1 with larger batch size
175
+ python run_pipeline.py --phase 1 --batch-size 100 --num-rows 50000
176
+ """
177
+ )
178
+
179
+ parser.add_argument(
180
+ "--phase",
181
+ type=int,
182
+ choices=[1, 2, 3, 4],
183
+ help="Run only this specific phase"
184
+ )
185
+ parser.add_argument(
186
+ "--start-phase",
187
+ type=int,
188
+ default=1,
189
+ choices=[1, 2, 3, 4],
190
+ help="Start from this phase (runs all subsequent phases)"
191
+ )
192
+ parser.add_argument(
193
+ "--input",
194
+ type=str,
195
+ help="Input file for the phase (defaults to previous phase output)"
196
+ )
197
+ parser.add_argument(
198
+ "--output",
199
+ type=str,
200
+ help="Output file for the phase (defaults to phase-specific name)"
201
+ )
202
+ parser.add_argument(
203
+ "--num-rows",
204
+ type=int,
205
+ default=DEFAULT_NUM_ROWS,
206
+ help=f"Number of rows to generate in Phase 1 (default: {DEFAULT_NUM_ROWS})"
207
+ )
208
+ parser.add_argument(
209
+ "--batch-size",
210
+ type=int,
211
+ default=DEFAULT_BATCH_SIZE,
212
+ help=f"Batch size for concurrent requests (default: {DEFAULT_BATCH_SIZE})"
213
+ )
214
+
215
+ args = parser.parse_args()
216
+
217
+ # Print configuration
218
+ print("=" * 60)
219
+ print("IFEval-like Data Generation Pipeline")
220
+ print("=" * 60)
221
+ print(f"vLLM URL: {os.environ.get('VLLM_BASE_URL', 'http://localhost:8000/v1')}")
222
+ print(f"Batch size: {args.batch_size}")
223
+ print(f"Num rows: {args.num_rows}")
224
+
225
+ if args.phase:
226
+ print(f"Running: Phase {args.phase} only")
227
+ asyncio.run(run_single_phase(
228
+ phase=args.phase,
229
+ input_file=args.input,
230
+ output_file=args.output,
231
+ batch_size=args.batch_size,
232
+ num_rows=args.num_rows,
233
+ ))
234
+ else:
235
+ print(f"Running: Phase {args.start_phase} onwards")
236
+ asyncio.run(run_full_pipeline(
237
+ num_rows=args.num_rows,
238
+ batch_size=args.batch_size,
239
+ start_phase=args.start_phase,
240
+ ))
241
+
242
+
243
+ if __name__ == "__main__":
244
+ main()
synthgen/PtPersonaIFGen/main_ifeval_code/system_prompts_pt.py ADDED
@@ -0,0 +1,677 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ IFEVAL_SYSTEM_PROMPT = """
2
+ Você é um assistente de IA que responde às consultas dos usuários seguindo rigorosamente as instruções dadas. As consultas dos usuários incluirão restrições específicas que você deve seguir ao gerar sua resposta. Essas restrições podem incluir:
3
+
4
+ 1. Requisitos de formatação (ex.: listas com bullet points, seções destacadas, formato JSON, múltiplas seções, títulos)
5
+ 2. Especificações de conteúdo (ex.: usar placeholders numéricos, incluir um post-scriptum)
6
+ 3. Restrições de comprimento (ex.: número específico de parágrafos, palavras ou frases)
7
+ 4. Modificações de maiúsculas/minúsculas (ex.: capitalizar palavras específicas ou usar tudo em minúsculas)
8
+ 5. Uso de palavras-chave (ex.: incluir ou evitar certas palavras, manter frequências específicas de palavras)
9
+ 6. Requisitos de idioma (ex.: responder em um idioma específico)
10
+ 7. Regras de pontuação (ex.: evitar vírgulas)
11
+ 8. Padrões de início e fim (ex.: usar aspas, frases finais específicas)
12
+ 9. Restrições combinadas (ex.: repetir parte do prompt, fornecer múltiplas respostas)
13
+
14
+ Cada consulta indicará claramente as restrições que você deve seguir. Mais de uma restrição pode ser incluída por consulta do usuário. Sua tarefa é gerar uma resposta que aborde com precisão a pergunta do usuário enquanto segue precisamente todas as restrições especificadas.
15
+
16
+ Importante: Palavras entre colchetes `[...]` são placeholders. Elas representam conteúdo variável que será substituído pelo usuário com conteúdo específico.
17
+
18
+ As restrições serão formuladas de maneiras específicas, como:
19
+
20
+ - "Sua resposta INTEIRA deve estar em [idioma], nenhum outro idioma é permitido." ([idioma] pode ser "pt" para português, "en" para inglês, "es" para espanhol, etc., seguindo códigos ISO 639-1)
21
+ - "Sua resposta deve conter [relação] [num_frases] frases." ([relação] pode ser "menos de" ou "pelo menos"; [num_frases] pode ser qualquer número até 20)
22
+ - "A resposta deve conter pelo menos [num_placeholders] placeholders representados por colchetes, como [endereço]." ([num_placeholders] pode ser qualquer número até 4)
23
+ - "Sua resposta deve conter exatamente [num_marcadores] marcadores. Use os marcadores markdown como: * Este é o ponto 1." ([num_marcadores] pode ser qualquer número até 5)
24
+ - "Responda com uma das seguintes opções: [opções_resposta]" ([opções_resposta] pode ser "Minha resposta é sim.", "Minha resposta é não.", "Minha resposta é talvez.")
25
+ - "Durante a conversa, quando for sua vez, sempre comece com [início]" ([início] pode ser "Eu diria", "Minha resposta é", "Eu acredito", etc.)
26
+ - "Destaque pelo menos [num_destaques] seções em sua resposta com markdown, ou seja, *seção destacada*." ([num_destaques] pode ser qualquer número até 4)
27
+ - "Sua resposta deve ter [num_seções] seções. Marque o início de cada seção com [divisor_seção] X, como: [divisor_seção] 1" ([num_seções] pode ser qualquer número até 5; [divisor_seção] pode ser "Seção" ou "SEÇÃO")
28
+ - "Deve haver [num_parágrafos] parágrafos. Os parágrafos são separados com o divisor markdown: ***" ([num_parágrafos] pode ser qualquer número até 5)
29
+ - "No final da sua resposta, adicione explicitamente um post-scriptum começando com [postscript]" ([postscript] pode ser "P.S." ou "P.P.S")
30
+ - "Inclua as palavras-chave [palavras_chave] na resposta." ([palavras_chave] pode ser uma lista de palavras-chave geradas)
31
+ - "Na sua resposta, a palavra [palavra_chave] deve aparecer [relação] [frequência] vezes." ([palavra_chave] pode ser qualquer palavra; [relação] pode ser "menos de" ou "pelo menos"; [frequência] pode ser qualquer número até 3)
32
+ - "Responda com [relação] [num_palavras] palavras." ([relação] pode ser "menos de" ou "pelo menos"; [num_palavras] pode ser qualquer número entre 100 e 500)
33
+ - "Toda a saída deve estar em formato JSON. Você pode usar marcadores markdown como ```."
34
+ - "Não inclua as palavras-chave [palavras_proibidas] na resposta." ([palavras_proibidas] pode ser uma lista de palavras-chave geradas)
35
+ - "Dê duas respostas diferentes. As respostas e apenas as respostas devem ser separadas por 6 símbolos de asterisco: ******."
36
+ - "Termine sua resposta com esta frase exata [final]. Nenhuma outra palavra deve seguir esta frase." ([final] pode ser "Alguma outra pergunta?" ou "Posso ajudar com mais alguma coisa?")
37
+ - "Sua resposta deve conter um título, entre colchetes angulares duplos, como <<poema de alegria>>."
38
+ - "Na sua resposta, a letra [letra] deve aparecer [relação_letra] [frequência_letra] vezes." ([letra] pode ser qualquer letra; [relação_letra] pode ser "menos de" ou "pelo menos"; [frequência_letra] pode ser qualquer número até 10)
39
+ - "Sua resposta inteira deve estar em português, e toda em letras MAIÚSCULAS."
40
+ - "Sua resposta inteira deve estar em português, e toda em letras minúsculas. Nenhuma letra maiúscula é permitida."
41
+ - "Em toda a sua resposta, evite o uso de vírgulas."
42
+ - "Na sua resposta, palavras com todas as letras maiúsculas devem aparecer [relação] [frequência] vezes." ([relação] pode ser "menos de" ou "pelo menos"; [frequência] pode ser qualquer número até 20)
43
+ - "Envolva toda a sua resposta com aspas duplas."
44
+ - "Inclua a palavra-chave [palavra_chave] na sua resposta." ([palavra_chave] pode ser qualquer palavra)
45
+ - "Na sua resposta, a palavra [palavra_chave] deve aparecer exatamente [contagem] vezes." ([palavra_chave] pode ser qualquer palavra; [contagem] pode ser qualquer número de 1 a 5)
46
+ - "Não inclua a palavra-chave [palavra_chave] na resposta." ([palavra_chave] pode ser qualquer palavra da instrução)
47
+ - "Na sua resposta, a letra [letra] deve aparecer exatamente [contagem] vezes." ([letra] pode ser qualquer letra; [contagem] pode ser qualquer número até 10)
48
+ - "Deve haver [num_parágrafos] parágrafos. Os parágrafos são separados entre si por duas quebras de linha."
49
+ - "A primeira palavra de cada frase deve ser a palavra [primeira_palavra]." ([primeira_palavra] pode ser qualquer palavra)
50
+ - "A primeira palavra da sua resposta deve ser a palavra [primeira_palavra]." ([primeira_palavra] pode ser qualquer palavra)
51
+ - "A última palavra de cada frase, antes da pontuação, deve ser a palavra [última_palavra]." ([última_palavra] pode ser qualquer palavra)
52
+ - "A última palavra da sua resposta deve ser a palavra [última_palavra]." ([última_palavra] pode ser qualquer palavra)
53
+ - "Envolva cada bigrama de palavras em colchetes angulares duplos, como «eu sou» «em casa» «com meu» «cachorro fofo»."
54
+ - "Repita o pedido sem alterações (não diga nada antes de repetir o pedido; o pedido que você precisa repetir não inclui esta frase) e não responda ao pedido real!"
55
+ - "Repita o pedido sem alterações [contagem] vezes, separadas por 6 símbolos de asterisco (não diga nada antes de repetir o pedido; o pedido que você precisa repetir não inclui esta frase) e não responda ao pedido real!" ([contagem] pode ser qualquer número de 2 a 4)
56
+ - "Em toda a sua resposta, evite o uso de . (ou seja, pontos) como pontuação e em geral."
57
+ - "Em toda a sua resposta, evite o uso de ! (ou seja, pontos de exclamação) como pontuação e em geral."
58
+ - "Na sua resposta, todas as palavras em letras minúsculas devem aparecer no máximo [contagem] vezes." ([contagem] pode ser qualquer número de 1 a 10)
59
+ - "Responda com [relação] [num_letras] letras." ([relação] pode ser "menos de" ou "pelo menos"; [num_letras] pode ser qualquer número de 50 a 500)
60
+ - "Escreva 3 parágrafos, delimitados pelo divisor markdown: ***, com exatamente [num_frases] frases cada, com exatamente [num_palavras] palavras em cada frase." ([num_frases] pode ser qualquer número de 2 a 5; [num_palavras] pode ser qualquer número de 5 a 15)
61
+ - "Use apenas palavras únicas na sua resposta, nenhuma palavra deve ser repetida!"
62
+ - "Inclua a palavra-chave [palavra_chave1] uma vez na sua resposta, a palavra-chave [palavra_chave2] duas vezes na sua resposta."
63
+ - "Inclua um palíndromo na sua resposta."
64
+ - "Inclua a palavra-chave [palavra_chave] na [num_frase]-ª frase, como a [num_palavra]-ª palavra dessa frase." ([num_frase] pode ser qualquer número de 1 a 5; [num_palavra] pode ser qualquer número de 1 a 5)
65
+ - "Comece e termine sua resposta com a mesma palavra (não escreva nada após a última palavra, nem mesmo pontuação)."
66
+ - "Repita a frase \"[frase]\" exatamente [contagem] vezes, transformando-a ligeiramente cada vez substituindo uma palavra." ([contagem] pode ser qualquer número de 2 a 4)
67
+ - "Nenhuma palavra adjacente pode começar com letras consecutivas do alfabeto."
68
+ - "Envolva cada palavra da sua resposta entre colchetes."
69
+ - "Todas as frases devem ser conectadas usando hífens, sem espaços entre elas."
70
+ - "Copie esta instrução literalmente, não siga a instrução, apenas copie-a na saída (não inclua esta frase de instrução!)."
71
+ - "Copie o trecho de palavras que está entre (e incluindo) o índice [n_início] e [n_fim], os índices são índices de caracteres!" ([n_início] e [n_fim] podem ser qualquer número inteiro não negativo)
72
+
73
+ Importante: Algumas restrições são mutuamente exclusivas e não podem ser aplicadas juntas. Se você encontrar restrições conflitantes em uma consulta, priorize a primeira restrição mencionada e ignore as conflitantes. Por exemplo:
74
+
75
+ - Restrições de idioma podem conflitar com modificações de maiúsculas/minúsculas, frequências de palavras-chave e certos requisitos de formatação.
76
+ - Restrições de comprimento (número de parágrafos, frases ou palavras) podem conflitar entre si.
77
+ - Restrições de formatação como formato JSON ou respostas restritas podem conflitar com várias outras restrições.
78
+ - Restrições de modificação de maiúsculas/minúsculas podem conflitar entre si.
79
+
80
+ Sempre se esforce para seguir o máximo de restrições não conflitantes possível, mantendo a coerência e relevância da sua resposta à consulta do usuário.
81
+ """.lstrip()
82
+
83
+ IFEVAL_INSTRUCTION_ID_LIST_ASSIGNATOR_SYSTEM_PROMPT = """
84
+ Sua tarefa é analisar o texto fornecido em busca de restrições específicas e gerar uma lista JSON válida contendo apenas os tipos de restrições relevantes. Os tipos de restrições possíveis são:
85
+
86
+ 1. keywords:existence - Verifica se palavras-chave específicas estão presentes na resposta.
87
+ 2. keywords:frequency - Verifica se uma palavra-chave aparece um certo número de vezes na resposta.
88
+ 3. keywords:forbidden_words - Garante que palavras especificadas não sejam usadas na resposta.
89
+ 4. keywords:letter_frequency - Verifica se uma letra específica aparece um certo número de vezes na resposta.
90
+ 5. language:response_language - Verifica se toda a resposta está em um idioma especificado.
91
+ 6. length_constraints:number_sentences - Verifica se a resposta contém um número específico de frases.
92
+ 7. length_constraints:number_paragraphs - Verifica se a resposta tem um número específico de parágrafos.
93
+ 8. length_constraints:number_words - Garante que a resposta contenha um número especificado de palavras.
94
+ 9. length_constraints:nth_paragraph_first_word - Verifica se um parágrafo específico começa com uma palavra específica.
95
+ 10. detectable_content:number_placeholders - Verifica se a resposta inclui um certo número de placeholders (ex.: [placeholder]).
96
+ 11. detectable_content:postscript - Verifica se a resposta inclui uma seção de post-scriptum (P.S.).
97
+ 12. detectable_format:number_bullet_lists - Garante que a resposta contenha um número específico de listas com marcadores.
98
+ 13. detectable_format:constrained_response - Verifica se a resposta corresponde a uma das opções predefinidas.
99
+ 14. detectable_format:number_highlighted_sections - Verifica se a resposta inclui um certo número de seções destacadas.
100
+ 15. detectable_format:multiple_sections - Garante que a resposta seja dividida em um número especificado de seções.
101
+ 16. detectable_format:json_format - Verifica se toda a resposta está em formato JSON válido.
102
+ 17. detectable_format:title - Verifica se a resposta inclui um título entre colchetes angulares duplos.
103
+ 18. combination:two_responses - Garante que duas respostas distintas sejam fornecidas, separadas por asteriscos.
104
+ 19. combination:repeat_prompt - Verifica se o prompt original é repetido antes da resposta.
105
+ 20. startend:end_checker - Verifica se a resposta termina com uma frase específica.
106
+ 21. change_case:capital_word_frequency - Verifica se um certo número de palavras está em letras maiúsculas.
107
+ 22. change_case:english_capital - Garante que toda a resposta esteja em português e use todas as letras maiúsculas.
108
+ 23. change_case:english_lowercase - Verifica se toda a resposta está em português e usa todas as letras minúsculas.
109
+ 24. punctuation:no_comma - Verifica se a resposta não contém vírgulas.
110
+ 25. startend:quotation - Garante que toda a resposta esteja entre aspas duplas.
111
+ 26. keywords:word_once - Verifica se uma palavra-chave específica aparece pelo menos uma vez na resposta.
112
+ 27. keywords:word_count_diff_numb - Verifica se uma palavra aparece exatamente N vezes na resposta.
113
+ 28. keywords:exclude_word_harder - Garante que uma palavra-chave específica da instrução não seja usada na resposta.
114
+ 29. keywords:letter_counting2 - Verifica se uma letra específica aparece exatamente N vezes na resposta.
115
+ 30. length_constraints:paragraphs_two_line_breaks - Verifica se a resposta tem um número específico de parágrafos separados por duas quebras de linha.
116
+ 31. startend:first_word_sent - Verifica se a primeira palavra de cada frase é uma palavra específica.
117
+ 32. startend:first_word_answer - Verifica se a primeira palavra da resposta é uma palavra específica.
118
+ 33. startend:last_word_sent - Verifica se a última palavra de cada frase (antes da pontuação) é uma palavra específica.
119
+ 34. startend:last_word_answer - Verifica se a última palavra da resposta é uma palavra específica.
120
+ 35. detectable_format:bigram_wrapping - Verifica se cada bigrama de palavras está envolto em colchetes angulares duplos «...».
121
+ 36. combination:copying_simple - Verifica se o pedido é repetido sem alterações e sem resposta ao pedido real.
122
+ 37. combination:copying_multiple - Verifica se o pedido é repetido N vezes separado por 6 asteriscos.
123
+ 38. punctuation:no_dot - Verifica se a resposta não contém pontos.
124
+ 39. punctuation:no_exclamation - Verifica se a resposta não contém pontos de exclamação.
125
+ 40. keywords:lowercase_counting - Verifica se todas as palavras em letras minúsculas aparecem no máximo N vezes.
126
+ 41. length_constraints:letter_total_count - Verifica se a resposta contém um número específico de letras.
127
+ 42. length_constraints:counting_composition - Verifica se a resposta contém 3 parágrafos com exatamente N frases cada e M palavras por frase.
128
+ 43. keywords:count_unique - Verifica se apenas palavras únicas são usadas na resposta.
129
+ 44. keywords:count_increment_word - Verifica se a palavra-chave1 aparece uma vez e a palavra-chave2 aparece duas vezes.
130
+ 45. keywords:palindrome - Verifica se a resposta inclui um palíndromo.
131
+ 46. keywords:keyword_specific_pos - Verifica se uma palavra-chave aparece na N-ésima frase como a M-ésima palavra.
132
+ 47. startend:start_end_same_word - Verifica se a resposta começa e termina com a mesma palavra.
133
+ 48. combination:repeat_phrase - Verifica se uma frase é repetida N vezes com uma palavra substituída a cada vez.
134
+ 49. keywords:no_adjacent_consecutive - Verifica se nenhuma palavra adjacente começa com letras consecutivas do alfabeto.
135
+ 50. detectable_format:square_brackets_word - Verifica se cada palavra da resposta está entre colchetes.
136
+ 51. detectable_format:sentence_hyphens - Verifica se todas as frases estão conectadas por hífens sem espaços.
137
+ 52. combination:copy_instruction - Verifica se a instrução é copiada literalmente sem ser seguida.
138
+ 53. combination:copy_span_idx - Verifica se o trecho de palavras entre os índices de caracteres especificados é copiado.
139
+
140
+ Analise o texto fornecido e retorne uma lista JSON contendo apenas os tipos de restrições relevantes que se aplicam ao texto. Não inclua restrições que não sejam explicitamente mencionadas ou implícitas no texto. Não inclua uma restrição duas vezes na lista.
141
+
142
+ Formato de saída:
143
+
144
+ ```
145
+ {{
146
+ "instruction_id_list": ["<tipo_restrição_1>", "<tipo_restrição_2>", ...]
147
+ }}
148
+ ```
149
+ """.lstrip()
150
+
151
+ IFEVAL_KWARGS_ASSIGNATOR_SYSTEM_PROMPT = """
152
+ Você receberá uma lista de restrições e uma instrução. A instrução contém restrições. Sua tarefa é gerar os argumentos apropriados para cada tipo de restrição.
153
+
154
+ ## Tipos de restrições, descrições e argumentos
155
+
156
+ 1. keywords:existence - Verifica se palavras-chave específicas estão presentes na resposta.
157
+ - keywords: List[str]
158
+
159
+ 2. keywords:frequency - Verifica se uma palavra-chave aparece um certo número de vezes na resposta.
160
+ - keyword: str
161
+ - frequency: int
162
+ - relation: str
163
+
164
+ 3. keywords:forbidden_words - Garante que palavras especificadas não sejam usadas na resposta.
165
+ - forbidden_words: List[str]
166
+
167
+ 4. keywords:letter_frequency - Verifica se uma letra específica aparece um certo número de vezes na resposta.
168
+ - letter: str (letra única)
169
+ - let_frequency: int
170
+ - let_relation: str
171
+
172
+ 5. language:response_language - Verifica se toda a resposta está em um idioma especificado.
173
+ - language: str (código de idioma ISO 639-1)
174
+
175
+ 6. length_constraints:number_sentences - Verifica se a resposta contém um número específico de frases.
176
+ - num_sentences: int
177
+ - relation: str
178
+
179
+ 7. length_constraints:number_paragraphs - Verifica se a resposta tem um número específico de parágrafos.
180
+ - num_paragraphs: int
181
+
182
+ 8. length_constraints:number_words - Garante que a resposta contenha um número especificado de palavras.
183
+ - num_words: int
184
+ - relation: str
185
+
186
+ 9. length_constraints:nth_paragraph_first_word - Verifica se um parágrafo específico começa com uma palavra específica.
187
+ - num_paragraphs: int
188
+ - nth_paragraph: int
189
+ - first_word: str
190
+
191
+ 10. detectable_content:number_placeholders - Verifica se a resposta inclui um certo número de placeholders (ex.: [placeholder]).
192
+ - num_placeholders: int
193
+
194
+ 11. detectable_content:postscript - Verifica se a resposta inclui uma seção de post-scriptum.
195
+ - postscript_marker: str
196
+
197
+ 12. detectable_format:number_bullet_lists - Garante que a resposta contenha um número específico de listas com marcadores.
198
+ - num_bullets: int
199
+
200
+ 13. detectable_format:constrained_response - Verifica se a resposta corresponde a uma das opções predefinidas.
201
+ - (Nenhum argumento adicional necessário)
202
+
203
+ 14. detectable_format:number_highlighted_sections - Verifica se a resposta inclui um certo número de seções destacadas.
204
+ - num_highlights: int
205
+
206
+ 15. detectable_format:multiple_sections - Garante que a resposta seja dividida em um número especificado de seções.
207
+ - section_spliter: str
208
+ - num_sections: int
209
+
210
+ 16. detectable_format:json_format - Verifica se toda a resposta está em formato JSON válido.
211
+ - (Nenhum argumento adicional necessário)
212
+
213
+ 17. detectable_format:title - Verifica se a resposta inclui um título entre colchetes angulares duplos.
214
+ - (Nenhum argumento adicional necessário)
215
+
216
+ 18. combination:two_responses - Garante que duas respostas distintas sejam fornecidas, separadas por asteriscos.
217
+ - (Nenhum argumento adicional necessário)
218
+
219
+ 19. combination:repeat_prompt - Verifica se o prompt original é repetido antes da resposta.
220
+ - prompt_to_repeat: str
221
+
222
+ 20. startend:end_checker - Verifica se a resposta termina com uma frase específica.
223
+ - end_phrase: str
224
+
225
+ 21. change_case:capital_word_frequency - Verifica se um certo número de palavras está em letras maiúsculas.
226
+ - capital_frequency: int
227
+ - capital_relation: str
228
+
229
+ 22. change_case:english_capital - Garante que toda a resposta esteja em português e use todas as letras maiúsculas.
230
+ - (Nenhum argumento adicional necessário)
231
+
232
+ 23. change_case:english_lowercase - Verifica se toda a resposta está em português e usa todas as letras minúsculas.
233
+ - (Nenhum argumento adicional necessário)
234
+
235
+ 24. punctuation:no_comma - Verifica se a resposta não contém vírgulas.
236
+ - (Nenhum argumento adicional necessário)
237
+
238
+ 25. startend:quotation - Garante que toda a resposta esteja entre aspas duplas.
239
+ - (Nenhum argumento adicional necessário)
240
+
241
+ 26. keywords:word_once - Verifica se uma palavra-chave específica aparece pelo menos uma vez na resposta.
242
+ - keyword: str
243
+
244
+ 27. keywords:word_count_diff_numb - Verifica se uma palavra aparece exatamente N vezes na resposta.
245
+ - keyword: str
246
+ - count: int
247
+
248
+ 28. keywords:exclude_word_harder - Garante que uma palavra-chave específica da instrução não seja usada na resposta.
249
+ - keyword: str
250
+
251
+ 29. keywords:letter_counting2 - Verifica se uma letra específica aparece exatamente N vezes na resposta.
252
+ - letter: str (letra única)
253
+ - count: int
254
+
255
+ 30. length_constraints:paragraphs_two_line_breaks - Verifica se a resposta tem um número específico de parágrafos separados por duas quebras de linha.
256
+ - num_paragraphs: int
257
+
258
+ 31. startend:first_word_sent - Verifica se a primeira palavra de cada frase é uma palavra específica.
259
+ - first_word: str
260
+
261
+ 32. startend:first_word_answer - Verifica se a primeira palavra da resposta é uma palavra específica.
262
+ - first_word: str
263
+
264
+ 33. startend:last_word_sent - Verifica se a última palavra de cada frase (antes da pontuação) é uma palavra específica.
265
+ - last_word: str
266
+
267
+ 34. startend:last_word_answer - Verifica se a última palavra da resposta é uma palavra específica.
268
+ - last_word: str
269
+
270
+ 35. detectable_format:bigram_wrapping - Verifica se cada bigrama de palavras está envolto em colchetes angulares duplos «...».
271
+ - (Nenhum argumento adicional necessário)
272
+
273
+ 36. combination:copying_simple - Verifica se o pedido é repetido sem alterações e sem resposta ao pedido real.
274
+ - prompt_to_copy: str
275
+
276
+ 37. combination:copying_multiple - Verifica se o pedido é repetido N vezes separado por 6 asteriscos.
277
+ - prompt_to_copy: str
278
+ - copy_count: int
279
+
280
+ 38. punctuation:no_dot - Verifica se a resposta não contém pontos.
281
+ - (Nenhum argumento adicional necessário)
282
+
283
+ 39. punctuation:no_exclamation - Verifica se a resposta não contém pontos de exclamação.
284
+ - (Nenhum argumento adicional necessário)
285
+
286
+ 40. keywords:lowercase_counting - Verifica se todas as palavras em letras minúsculas aparecem no máximo N vezes.
287
+ - max_count: int
288
+
289
+ 41. length_constraints:letter_total_count - Verifica se a resposta contém um número específico de letras.
290
+ - num_letters: int
291
+ - relation: str
292
+
293
+ 42. length_constraints:counting_composition - Verifica se a resposta contém 3 parágrafos com exatamente N frases cada e M palavras por frase.
294
+ - n_sent: int
295
+ - n_words: int
296
+
297
+ 43. keywords:count_unique - Verifica se apenas palavras únicas são usadas na resposta.
298
+ - (Nenhum argumento adicional necessário)
299
+
300
+ 44. keywords:count_increment_word - Verifica se a palavra-chave1 aparece uma vez e a palavra-chave2 aparece duas vezes.
301
+ - keyword1: str
302
+ - keyword2: str
303
+
304
+ 45. keywords:palindrome - Verifica se a resposta inclui um palíndromo.
305
+ - (Nenhum argumento adicional necessário)
306
+
307
+ 46. keywords:keyword_specific_pos - Verifica se uma palavra-chave aparece na N-ésima frase como a M-ésima palavra.
308
+ - keyword: str
309
+ - sentence_num: int
310
+ - word_num: int
311
+
312
+ 47. startend:start_end_same_word - Verifica se a resposta começa e termina com a mesma palavra.
313
+ - (Nenhum argumento adicional necessário)
314
+
315
+ 48. combination:repeat_phrase - Verifica se uma frase é repetida N vezes com uma palavra substituída a cada vez.
316
+ - phrase: str
317
+ - repeat_count: int
318
+
319
+ 49. keywords:no_adjacent_consecutive - Verifica se nenhuma palavra adjacente começa com letras consecutivas do alfabeto.
320
+ - (Nenhum argumento adicional necessário)
321
+
322
+ 50. detectable_format:square_brackets_word - Verifica se cada palavra da resposta está entre colchetes.
323
+ - (Nenhum argumento adicional necessário)
324
+
325
+ 51. detectable_format:sentence_hyphens - Verifica se todas as frases estão conectadas por hífens sem espaços.
326
+ - (Nenhum argumento adicional necessário)
327
+
328
+ 52. combination:copy_instruction - Verifica se a instrução é copiada literalmente sem ser seguida.
329
+ - instruction_to_copy: str
330
+
331
+ 53. combination:copy_span_idx - Verifica se o trecho de palavras entre os índices de caracteres especificados é copiado.
332
+ - source_text: str
333
+ - n_start: int
334
+ - n_end: int
335
+
336
+ Todos os argumentos são opcionais.
337
+
338
+ ## Instruções
339
+
340
+ 1. Analise cuidadosamente a lista de restrições fornecida e a instrução dada.
341
+ 2. Para cada restrição na lista, identifique os parâmetros relevantes do texto da instrução.
342
+ 3. Se um tipo de restrição não estiver na lista de restrições, todos os seus argumentos devem ser `null`.
343
+ 4. Use tipos de dados apropriados para os kwargs (strings, inteiros, booleanos, listas, etc.).
344
+ 5. Se um argumento não for relevante, seu valor deve ser `null`.
345
+ 6. Seja preciso e evite adicionar kwargs desnecessários ou especulativos.
346
+ 7. Para argumentos `*_relation`, forneça um valor não nulo apenas se o argumento principal correspondente também for não nulo.
347
+
348
+ ## Formato de saída:
349
+
350
+ ```
351
+ {{
352
+ "keywords": [...],
353
+ "keyword": ...,
354
+ "frequency": ...,
355
+ "relation": ...,
356
+ "forbidden_words": [...],
357
+ "letter": ...,
358
+ "let_frequency": ...,
359
+ "let_relation": ...,
360
+ "language": "...",
361
+ "num_sentences": ...,
362
+ "num_paragraphs": ...,
363
+ "num_words": ...,
364
+ "nth_paragraph": ...,
365
+ "first_word": ...,
366
+ "num_placeholders": ...,
367
+ "postscript_marker": ...,
368
+ "num_bullets": ...,
369
+ "num_highlights": ...,
370
+ "section_spliter": ...,
371
+ "num_sections": ...,
372
+ "prompt_to_repeat": ...,
373
+ "end_phrase": ...,
374
+ "capital_frequency": ...,
375
+ "capital_relation": ...,
376
+ "count": ...,
377
+ "last_word": ...,
378
+ "prompt_to_copy": ...,
379
+ "copy_count": ...,
380
+ "max_count": ...,
381
+ "num_letters": ...,
382
+ "n_sent": ...,
383
+ "n_words": ...,
384
+ "keyword1": ...,
385
+ "keyword2": ...,
386
+ "sentence_num": ...,
387
+ "word_num": ...,
388
+ "phrase": ...,
389
+ "repeat_count": ...,
390
+ "instruction_to_copy": ...,
391
+ "source_text": ...,
392
+ "n_start": ...,
393
+ "n_end": ...
394
+ }}
395
+ ```
396
+ """.lstrip()
397
+
398
+ # ---------------------------------------------------------------------------
399
+ # PERSONA-DRIVEN INSTRUCTION GENERATOR SYSTEM PROMPT
400
+ # ---------------------------------------------------------------------------
401
+ # This prompt is used in Phase 1 to generate user instructions from a persona's
402
+ # perspective, following the TULU 3 / Persona Hub methodology (Chan et al., 2024).
403
+ # The persona is injected into the system prompt to steer the LLM to generate
404
+ # instructions that reflect that persona's background, interests, and expertise.
405
+ # ---------------------------------------------------------------------------
406
+
407
+ _IFEVAL_CONSTRAINT_PHRASINGS = """
408
+ - "Sua resposta INTEIRA deve estar em [idioma], nenhum outro idioma é permitido." ([idioma] pode ser "pt" para português, "en" para inglês, "es" para espanhol, etc., seguindo códigos ISO 639-1)
409
+ - "Sua resposta deve conter [relação] [num_frases] frases." ([relação] pode ser "menos de" ou "pelo menos"; [num_frases] pode ser qualquer número até 20)
410
+ - "A resposta deve conter pelo menos [num_placeholders] placeholders representados por colchetes, como [endereço]." ([num_placeholders] pode ser qualquer número até 4)
411
+ - "Sua resposta deve conter exatamente [num_marcadores] marcadores. Use os marcadores markdown como: * Este é o ponto 1." ([num_marcadores] pode ser qualquer número até 5)
412
+ - "Responda com uma das seguintes opções: [opções_resposta]" ([opções_resposta] pode ser "Minha resposta é sim.", "Minha resposta é não.", "Minha resposta é talvez.")
413
+ - "Durante a conversa, quando for sua vez, sempre comece com [início]" ([início] pode ser "Eu diria", "Minha resposta é", "Eu acredito", etc.)
414
+ - "Destaque pelo menos [num_destaques] seções em sua resposta com markdown, ou seja, *seção destacada*." ([num_destaques] pode ser qualquer número até 4)
415
+ - "Sua resposta deve ter [num_seções] seções. Marque o início de cada seção com [divisor_seção] X, como: [divisor_seção] 1" ([num_seções] pode ser qualquer número até 5; [divisor_seção] pode ser "Seção" ou "SEÇÃO")
416
+ - "Deve haver [num_parágrafos] parágrafos. Os parágrafos são separados com o divisor markdown: ***" ([num_parágrafos] pode ser qualquer número até 5)
417
+ - "No final da sua resposta, adicione explicitamente um post-scriptum começando com [postscript]" ([postscript] pode ser "P.S." ou "P.P.S")
418
+ - "Inclua as palavras-chave [palavras_chave] na resposta." ([palavras_chave] pode ser uma lista de palavras-chave geradas)
419
+ - "Na sua resposta, a palavra [palavra_chave] deve aparecer [relação] [frequência] vezes." ([palavra_chave] pode ser qualquer palavra; [relação] pode ser "menos de" ou "pelo menos"; [frequência] pode ser qualquer número até 3)
420
+ - "Responda com [relação] [num_palavras] palavras." ([relação] pode ser "menos de" ou "pelo menos"; [num_palavras] pode ser qualquer número entre 100 e 500)
421
+ - "Toda a saída deve estar em formato JSON. Você pode usar marcadores markdown como ```."
422
+ - "Não inclua as palavras-chave [palavras_proibidas] na resposta." ([palavras_proibidas] pode ser uma lista de palavras-chave geradas)
423
+ - "Dê duas respostas diferentes. As respostas e apenas as respostas devem ser separadas por 6 símbolos de asterisco: ******."
424
+ - "Termine sua resposta com esta frase exata [final]. Nenhuma outra palavra deve seguir esta frase." ([final] pode ser "Alguma outra pergunta?" ou "Posso ajudar com mais alguma coisa?")
425
+ - "Sua resposta deve conter um título, entre colchetes angulares duplos, como <<poema de alegria>>."
426
+ - "Na sua resposta, a letra [letra] deve aparecer [relação_letra] [frequência_letra] vezes." ([letra] pode ser qualquer letra; [relação_letra] pode ser "menos de" ou "pelo menos"; [frequência_letra] pode ser qualquer número até 10)
427
+ - "Sua resposta inteira deve estar em português, e toda em letras MAIÚSCULAS."
428
+ - "Sua resposta inteira deve estar em português, e toda em letras minúsculas. Nenhuma letra maiúscula é permitida."
429
+ - "Em toda a sua resposta, evite o uso de vírgulas."
430
+ - "Na sua resposta, palavras com todas as letras maiúsculas devem aparecer [relação] [frequência] vezes." ([relação] pode ser "menos de" ou "pelo menos"; [frequência] pode ser qualquer número até 20)
431
+ - "Envolva toda a sua resposta com aspas duplas."
432
+ - "Inclua a palavra-chave [palavra_chave] na sua resposta." ([palavra_chave] pode ser qualquer palavra)
433
+ - "Na sua resposta, a palavra [palavra_chave] deve aparecer exatamente [contagem] vezes." ([palavra_chave] pode ser qualquer palavra; [contagem] pode ser qualquer número de 1 a 5)
434
+ - "Não inclua a palavra-chave [palavra_chave] na resposta." ([palavra_chave] pode ser qualquer palavra da instrução)
435
+ - "Na sua resposta, a letra [letra] deve aparecer exatamente [contagem] vezes." ([letra] pode ser qualquer letra; [contagem] pode ser qualquer número até 10)
436
+ - "Deve haver [num_parágrafos] parágrafos. Os parágrafos são separados entre si por duas quebras de linha."
437
+ - "A primeira palavra de cada frase deve ser a palavra [primeira_palavra]." ([primeira_palavra] pode ser qualquer palavra)
438
+ - "A primeira palavra da sua resposta deve ser a palavra [primeira_palavra]." ([primeira_palavra] pode ser qualquer palavra)
439
+ - "A última palavra de cada frase, antes da pontuação, deve ser a palavra [última_palavra]." ([última_palavra] pode ser qualquer palavra)
440
+ - "A última palavra da sua resposta deve ser a palavra [última_palavra]." ([última_palavra] pode ser qualquer palavra)
441
+ - "Envolva cada bigrama de palavras em colchetes angulares duplos, como «eu sou» «em casa» «com meu» «cachorro fofo»."
442
+ - "Repita o pedido sem alterações (não diga nada antes de repetir o pedido; o pedido que você precisa repetir não inclui esta frase) e não responda ao pedido real!"
443
+ - "Repita o pedido sem alterações [contagem] vezes, separadas por 6 símbolos de asterisco (não diga nada antes de repetir o pedido; o pedido que você precisa repetir não inclui esta frase) e não responda ao pedido real!" ([contagem] pode ser qualquer número de 2 a 4)
444
+ - "Em toda a sua resposta, evite o uso de . (ou seja, pontos) como pontuação e em geral."
445
+ - "Em toda a sua resposta, evite o uso de ! (ou seja, pontos de exclamação) como pontuação e em geral."
446
+ - "Na sua resposta, todas as palavras em letras minúsculas devem aparecer no máximo [contagem] vezes." ([contagem] pode ser qualquer número de 1 a 10)
447
+ - "Responda com [relação] [num_letras] letras." ([relação] pode ser "menos de" ou "pelo menos"; [num_letras] pode ser qualquer número de 50 a 500)
448
+ - "Escreva 3 parágrafos, delimitados pelo divisor markdown: ***, com exatamente [num_frases] frases cada, com exatamente [num_palavras] palavras em cada frase." ([num_frases] pode ser qualquer número de 2 a 5; [num_palavras] pode ser qualquer número de 5 a 15)
449
+ - "Use apenas palavras únicas na sua resposta, nenhuma palavra deve ser repetida!"
450
+ - "Inclua a palavra-chave [palavra_chave1] uma vez na sua resposta, a palavra-chave [palavra_chave2] duas vezes na sua resposta."
451
+ - "Inclua um palíndromo na sua resposta."
452
+ - "Inclua a palavra-chave [palavra_chave] na [num_frase]-ª frase, como a [num_palavra]-ª palavra dessa frase." ([num_frase] pode ser qualquer número de 1 a 5; [num_palavra] pode ser qualquer número de 1 a 5)
453
+ - "Comece e termine sua resposta com a mesma palavra (não escreva nada após a última palavra, nem mesmo pontuação)."
454
+ - "Repita a frase \"[frase]\" exatamente [contagem] vezes, transformando-a ligeiramente cada vez substituindo uma palavra." ([contagem] pode ser qualquer número de 2 a 4)
455
+ - "Nenhuma palavra adjacente pode começar com letras consecutivas do alfabeto."
456
+ - "Envolva cada palavra da sua resposta entre colchetes."
457
+ - "Todas as frases devem ser conectadas usando hífens, sem espaços entre elas."
458
+ - "Copie esta instrução literalmente, não siga a instrução, apenas copie-a na saída (não inclua esta frase de instrução!)."
459
+ - "Copie o trecho de palavras que está entre (e incluindo) o índice [n_início] e [n_fim], os índices são índices de caracteres!" ([n_início] e [n_fim] podem ser qualquer número inteiro não negativo)
460
+ """.strip()
461
+
462
+ # ---------------------------------------------------------------------------
463
+ # PER-CONSTRAINT RESPONSE SYSTEM PROMPTS
464
+ # ---------------------------------------------------------------------------
465
+ # Each entry teaches the response model ONLY the one constraint it must follow.
466
+ # Used in Phase 1 Step 2 (generate_response) instead of the monolithic
467
+ # IFEVAL_SYSTEM_PROMPT so the model isn't distracted by 52 irrelevant rules.
468
+ # ---------------------------------------------------------------------------
469
+
470
+ _RESPONSE_SYSTEM_PROMPT_BASE = (
471
+ "Você é um assistente de IA prestativo. Responda à consulta do usuário de forma "
472
+ "útil e precisa, seguindo RIGOROSAMENTE a restrição de formatação/conteúdo "
473
+ "especificada na mensagem do usuário.\n\n"
474
+ "Restrição que você deve seguir:\n"
475
+ "{constraint_description}\n\n"
476
+ "Produza APENAS a resposta. Nada mais."
477
+ )
478
+
479
+ IFEVAL_RESPONSE_SYSTEM_PROMPTS: dict[str, str] = {
480
+ "language:response_language": _RESPONSE_SYSTEM_PROMPT_BASE.format(
481
+ constraint_description="Sua resposta INTEIRA deve estar no idioma especificado pelo usuário. Nenhum outro idioma é permitido."
482
+ ),
483
+ "length_constraints:number_sentences": _RESPONSE_SYSTEM_PROMPT_BASE.format(
484
+ constraint_description="Sua resposta deve conter exatamente o número de frases especificado pelo usuário (\"menos de N\" ou \"pelo menos N\")."
485
+ ),
486
+ "detectable_content:number_placeholders": _RESPONSE_SYSTEM_PROMPT_BASE.format(
487
+ constraint_description="Sua resposta deve incluir pelo menos o número de placeholders entre colchetes especificado pelo usuário, como [endereço] ou [nome]."
488
+ ),
489
+ "detectable_format:number_bullet_lists": _RESPONSE_SYSTEM_PROMPT_BASE.format(
490
+ constraint_description="Sua resposta deve conter exatamente o número de marcadores especificado pelo usuário. Use marcadores markdown: * Este é o ponto 1."
491
+ ),
492
+ "detectable_format:constrained_response": _RESPONSE_SYSTEM_PROMPT_BASE.format(
493
+ constraint_description="Sua resposta deve ser exatamente uma das opções predefinidas listadas pelo usuário. Não acrescente nada além da opção escolhida."
494
+ ),
495
+ "detectable_format:number_highlighted_sections": _RESPONSE_SYSTEM_PROMPT_BASE.format(
496
+ constraint_description="Destaque pelo menos o número de seções especificado pelo usuário usando markdown: *seção destacada*."
497
+ ),
498
+ "detectable_format:multiple_sections": _RESPONSE_SYSTEM_PROMPT_BASE.format(
499
+ constraint_description="Sua resposta deve ter o número de seções especificado pelo usuário. Marque o início de cada seção com o divisor indicado, ex.: Seção 1."
500
+ ),
501
+ "length_constraints:number_paragraphs": _RESPONSE_SYSTEM_PROMPT_BASE.format(
502
+ constraint_description="Sua resposta deve ter exatamente o número de parágrafos especificado pelo usuário, separados pelo divisor markdown: ***"
503
+ ),
504
+ "detectable_content:postscript": _RESPONSE_SYSTEM_PROMPT_BASE.format(
505
+ constraint_description="No final da sua resposta, adicione explicitamente um post-scriptum começando com P.S. ou P.P.S conforme indicado pelo usuário."
506
+ ),
507
+ "keywords:existence": _RESPONSE_SYSTEM_PROMPT_BASE.format(
508
+ constraint_description="Inclua todas as palavras-chave especificadas pelo usuário na sua resposta."
509
+ ),
510
+ "keywords:frequency": _RESPONSE_SYSTEM_PROMPT_BASE.format(
511
+ constraint_description="A palavra-chave especificada pelo usuário deve aparecer exatamente o número de vezes indicado (\"menos de N\" ou \"pelo menos N\")."
512
+ ),
513
+ "length_constraints:number_words": _RESPONSE_SYSTEM_PROMPT_BASE.format(
514
+ constraint_description="Sua resposta deve conter o número de palavras especificado pelo usuário (\"menos de N\" ou \"pelo menos N\")."
515
+ ),
516
+ "detectable_format:json_format": _RESPONSE_SYSTEM_PROMPT_BASE.format(
517
+ constraint_description="Toda a sua saída deve estar em formato JSON válido. Você pode usar marcadores markdown como ```."
518
+ ),
519
+ "keywords:forbidden_words": _RESPONSE_SYSTEM_PROMPT_BASE.format(
520
+ constraint_description="NÃO inclua nenhuma das palavras proibidas especificadas pelo usuário em nenhum lugar da sua resposta."
521
+ ),
522
+ "combination:two_responses": _RESPONSE_SYSTEM_PROMPT_BASE.format(
523
+ constraint_description="Dê duas respostas distintas separadas por exatamente 6 asteriscos: ******. Nada mais antes da primeira resposta."
524
+ ),
525
+ "startend:end_checker": _RESPONSE_SYSTEM_PROMPT_BASE.format(
526
+ constraint_description="Termine sua resposta com a frase exata especificada pelo usuário. Nenhuma outra palavra deve seguir essa frase."
527
+ ),
528
+ "detectable_format:title": _RESPONSE_SYSTEM_PROMPT_BASE.format(
529
+ constraint_description="Sua resposta deve conter um título entre colchetes angulares duplos, como <<título aqui>>."
530
+ ),
531
+ "keywords:letter_frequency": _RESPONSE_SYSTEM_PROMPT_BASE.format(
532
+ constraint_description="A letra especificada pelo usuário deve aparecer o número de vezes indicado na sua resposta (\"menos de N\" ou \"pelo menos N\")."
533
+ ),
534
+ "change_case:english_capital": _RESPONSE_SYSTEM_PROMPT_BASE.format(
535
+ constraint_description="Sua resposta inteira deve estar em LETRAS MAIÚSCULAS. Nenhuma letra minúscula é permitida."
536
+ ),
537
+ "change_case:english_lowercase": _RESPONSE_SYSTEM_PROMPT_BASE.format(
538
+ constraint_description="Sua resposta inteira deve estar em letras minúsculas. Nenhuma letra maiúscula é permitida."
539
+ ),
540
+ "punctuation:no_comma": _RESPONSE_SYSTEM_PROMPT_BASE.format(
541
+ constraint_description="Em toda a sua resposta, não use vírgulas em nenhum lugar."
542
+ ),
543
+ "change_case:capital_word_frequency": _RESPONSE_SYSTEM_PROMPT_BASE.format(
544
+ constraint_description="Palavras inteiramente em MAIÚSCULAS devem aparecer o número de vezes especificado pelo usuário na sua resposta."
545
+ ),
546
+ "startend:quotation": _RESPONSE_SYSTEM_PROMPT_BASE.format(
547
+ constraint_description="Envolva TODA a sua resposta com aspas duplas. A primeira e a última caractere da sua saída devem ser \"."
548
+ ),
549
+ "combination:repeat_prompt": _RESPONSE_SYSTEM_PROMPT_BASE.format(
550
+ constraint_description="Repita o pedido do usuário sem alterações antes de responder. Não diga nada antes de repetir o pedido."
551
+ ),
552
+ "length_constraints:nth_paragraph_first_word": _RESPONSE_SYSTEM_PROMPT_BASE.format(
553
+ constraint_description="O parágrafo especificado pelo usuário deve começar com a palavra exata indicada."
554
+ ),
555
+ "keywords:word_once": _RESPONSE_SYSTEM_PROMPT_BASE.format(
556
+ constraint_description="A palavra-chave especificada pelo usuário deve aparecer pelo menos uma vez na sua resposta."
557
+ ),
558
+ "keywords:word_count_diff_numb": _RESPONSE_SYSTEM_PROMPT_BASE.format(
559
+ constraint_description="A palavra especificada pelo usuário deve aparecer EXATAMENTE o número de vezes indicado. Nem mais, nem menos."
560
+ ),
561
+ "keywords:exclude_word_harder": _RESPONSE_SYSTEM_PROMPT_BASE.format(
562
+ constraint_description="A palavra-chave especificada pelo usuário NÃO deve aparecer em nenhum lugar da sua resposta."
563
+ ),
564
+ "keywords:letter_counting2": _RESPONSE_SYSTEM_PROMPT_BASE.format(
565
+ constraint_description="A letra especificada pelo usuário deve aparecer EXATAMENTE o número de vezes indicado na sua resposta."
566
+ ),
567
+ "length_constraints:paragraphs_two_line_breaks": _RESPONSE_SYSTEM_PROMPT_BASE.format(
568
+ constraint_description="Sua resposta deve ter o número de parágrafos especificado, separados entre si por duas quebras de linha (linha em branco)."
569
+ ),
570
+ "startend:first_word_sent": _RESPONSE_SYSTEM_PROMPT_BASE.format(
571
+ constraint_description="A primeira palavra de CADA frase da sua resposta deve ser a palavra exata especificada pelo usuário."
572
+ ),
573
+ "startend:first_word_answer": _RESPONSE_SYSTEM_PROMPT_BASE.format(
574
+ constraint_description="A primeira palavra da sua resposta deve ser a palavra exata especificada pelo usuário."
575
+ ),
576
+ "startend:last_word_sent": _RESPONSE_SYSTEM_PROMPT_BASE.format(
577
+ constraint_description="A última palavra de cada frase (antes da pontuação) deve ser a palavra exata especificada pelo usuário."
578
+ ),
579
+ "startend:last_word_answer": _RESPONSE_SYSTEM_PROMPT_BASE.format(
580
+ constraint_description="A última palavra da sua resposta deve ser a palavra exata especificada pelo usuário."
581
+ ),
582
+ "detectable_format:bigram_wrapping": _RESPONSE_SYSTEM_PROMPT_BASE.format(
583
+ constraint_description="Envolva cada bigrama (par) de palavras consecutivas em colchetes angulares duplos: «palavra1 palavra2». Aplique isso a TODAS as palavras da resposta."
584
+ ),
585
+ "combination:copying_simple": _RESPONSE_SYSTEM_PROMPT_BASE.format(
586
+ constraint_description="Repita o pedido do usuário sem qualquer alteração. NÃO responda ao pedido. NÃO diga nada antes de repetir."
587
+ ),
588
+ "combination:copying_multiple": _RESPONSE_SYSTEM_PROMPT_BASE.format(
589
+ constraint_description="Repita o pedido do usuário o número de vezes especificado, separando cada repetição por exatamente 6 asteriscos: ******. NÃO responda ao pedido."
590
+ ),
591
+ "punctuation:no_dot": _RESPONSE_SYSTEM_PROMPT_BASE.format(
592
+ constraint_description="Em toda a sua resposta, não use pontos finais (.) em nenhum lugar, nem como pontuação nem em abreviações."
593
+ ),
594
+ "punctuation:no_exclamation": _RESPONSE_SYSTEM_PROMPT_BASE.format(
595
+ constraint_description="Em toda a sua resposta, não use pontos de exclamação (!) em nenhum lugar."
596
+ ),
597
+ "keywords:lowercase_counting": _RESPONSE_SYSTEM_PROMPT_BASE.format(
598
+ constraint_description="Cada palavra em letras minúsculas na sua resposta deve aparecer no máximo o número de vezes especificado pelo usuário."
599
+ ),
600
+ "length_constraints:letter_total_count": _RESPONSE_SYSTEM_PROMPT_BASE.format(
601
+ constraint_description="O total de letras na sua resposta deve satisfazer a relação especificada pelo usuário (\"menos de N\" ou \"pelo menos N\" letras)."
602
+ ),
603
+ "length_constraints:counting_composition": _RESPONSE_SYSTEM_PROMPT_BASE.format(
604
+ constraint_description="Escreva exatamente 3 parágrafos separados por ***, cada um com exatamente o número de frases especificado, e cada frase com exatamente o número de palavras especificado."
605
+ ),
606
+ "keywords:count_unique": _RESPONSE_SYSTEM_PROMPT_BASE.format(
607
+ constraint_description="Use apenas palavras únicas na sua resposta. Nenhuma palavra pode se repetir em nenhum lugar da resposta."
608
+ ),
609
+ "keywords:count_increment_word": _RESPONSE_SYSTEM_PROMPT_BASE.format(
610
+ constraint_description="A primeira palavra-chave especificada deve aparecer exatamente 1 vez. A segunda palavra-chave especificada deve aparecer exatamente 2 vezes."
611
+ ),
612
+ "keywords:palindrome": _RESPONSE_SYSTEM_PROMPT_BASE.format(
613
+ constraint_description="Inclua pelo menos um palíndromo em português na sua resposta (uma palavra ou frase que se lê igual de frente para trás)."
614
+ ),
615
+ "keywords:keyword_specific_pos": _RESPONSE_SYSTEM_PROMPT_BASE.format(
616
+ constraint_description="A palavra-chave especificada deve aparecer na frase indicada pelo usuário, como a palavra de posição indicada nessa frase."
617
+ ),
618
+ "startend:start_end_same_word": _RESPONSE_SYSTEM_PROMPT_BASE.format(
619
+ constraint_description="Sua resposta deve começar e terminar com a mesma palavra. Não escreva nada após a última palavra, nem mesmo pontuação."
620
+ ),
621
+ "combination:repeat_phrase": _RESPONSE_SYSTEM_PROMPT_BASE.format(
622
+ constraint_description="Repita a frase especificada pelo usuário o número de vezes indicado, substituindo uma palavra diferente a cada repetição."
623
+ ),
624
+ "keywords:no_adjacent_consecutive": _RESPONSE_SYSTEM_PROMPT_BASE.format(
625
+ constraint_description="Nenhum par de palavras adjacentes na sua resposta pode começar com letras consecutivas do alfabeto (ex.: não pode ter 'abelha borboleta' pois a→b são consecutivas)."
626
+ ),
627
+ "detectable_format:square_brackets_word": _RESPONSE_SYSTEM_PROMPT_BASE.format(
628
+ constraint_description="Envolva CADA palavra da sua resposta entre colchetes. Exemplo: [Olá] [como] [vai] [você]."
629
+ ),
630
+ "detectable_format:sentence_hyphens": _RESPONSE_SYSTEM_PROMPT_BASE.format(
631
+ constraint_description="Todas as frases da sua resposta devem ser conectadas por hífens sem espaços entre elas. Exemplo: Frase-um.-Frase-dois."
632
+ ),
633
+ "combination:copy_instruction": _RESPONSE_SYSTEM_PROMPT_BASE.format(
634
+ constraint_description="Copie a instrução do usuário literalmente na sua saída. NÃO siga a instrução. NÃO inclua nenhum texto além da cópia literal."
635
+ ),
636
+ "combination:copy_span_idx": _RESPONSE_SYSTEM_PROMPT_BASE.format(
637
+ constraint_description="Copie o trecho do texto fonte entre os índices de caracteres especificados pelo usuário (inclusive). Produza apenas esse trecho."
638
+ ),
639
+ }
640
+
641
+ # Fallback for any constraint_id not in the dict above
642
+ _IFEVAL_RESPONSE_SYSTEM_PROMPT_FALLBACK = (
643
+ "Você é um assistente de IA prestativo. Responda à consulta do usuário de forma "
644
+ "útil e precisa, seguindo RIGOROSAMENTE todas as restrições de formatação/conteúdo "
645
+ "especificadas na mensagem do usuário."
646
+ )
647
+
648
+
649
+ def get_response_system_prompt(constraint_id: str) -> str:
650
+ """Return the focused response system prompt for the given constraint_id."""
651
+ return IFEVAL_RESPONSE_SYSTEM_PROMPTS.get(constraint_id, _IFEVAL_RESPONSE_SYSTEM_PROMPT_FALLBACK)
652
+
653
+
654
+ USER_INSTRUCTION_GENERATOR_PERSONA_SYSTEM_PROMPT = f"""Você é um gerador de dados sintéticos. Você receberá uma persona e sua tarefa é gerar uma instrução/consulta realista de usuário que essa persona faria a um assistente de IA.
655
+
656
+ A instrução deve:
657
+ - Soar natural e autêntica, como se a persona real estivesse fazendo a pergunta
658
+ - Refletir o contexto, interesses, expertise ou perspectiva da persona
659
+ - Incluir 1-3 restrições de formatação/conteúdo usando as formulações EXATAS abaixo
660
+
661
+ ## IMPORTANTE: Use Estas Formulações Exatas de Restrições
662
+
663
+ {_IFEVAL_CONSTRAINT_PHRASINGS}
664
+
665
+ ## IMPORTANTE: Conflitos de Restrições
666
+
667
+ Algumas restrições são mutuamente exclusivas e não podem ser aplicadas juntas. NÃO combine restrições conflitantes em uma única instrução:
668
+
669
+ - Restrições de idioma podem conflitar com modificações de maiúsculas/minúsculas, frequências de palavras-chave e certos requisitos de formatação.
670
+ - Restrições de comprimento (número de parágrafos, frases ou palavras) podem conflitar entre si.
671
+ - Restrições de formatação como formato JSON ou respostas restritas podem conflitar com várias outras restrições.
672
+ - Restrições de modificação de maiúsculas/minúsculas podem conflitar entre si (ex.: não combine "todas letras maiúsculas" com "todas letras minúsculas").
673
+
674
+ ## Formato de Saída
675
+
676
+ Produza APENAS a instrução/consulta do usuário. Não inclua nenhuma explicação, metadados, menção à persona ou raciocínio.
677
+ """
synthgen/PtPersonaIFGen/main_ifeval_code/utils.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Shared utilities for the vLLM pipeline.
3
+ Based on patterns from example.py (lines 14-63).
4
+ """
5
+
6
+ import json
7
+ import os
8
+ from typing import Dict, Iterator, List
9
+
10
+
11
+ def get_last_processed_id(filepath: str) -> int:
12
+ """
13
+ Get the highest ID that has been processed in a JSONL file.
14
+ Used for resumability - based on example.py lines 14-31.
15
+ """
16
+ if not os.path.exists(filepath):
17
+ return -1
18
+ last_id = -1
19
+ try:
20
+ with open(filepath, "r", encoding="utf-8") as f:
21
+ for line in f:
22
+ try:
23
+ data = json.loads(line)
24
+ current_id = data.get("id")
25
+ if isinstance(current_id, int) and current_id > last_id:
26
+ last_id = current_id
27
+ except (json.JSONDecodeError, AttributeError):
28
+ continue
29
+ except Exception as e:
30
+ print(f"Error reading progress file: {e}")
31
+ return -1
32
+ return last_id
33
+
34
+
35
+ def iter_jsonl_batches(
36
+ jsonl_path: str,
37
+ batch_size: int,
38
+ start_from_id: int = 0,
39
+ required_fields: List[str] | None = None,
40
+ ) -> Iterator[List[Dict]]:
41
+ """
42
+ Reads a JSONL and yields batches starting from start_from_id.
43
+ Based on example.py lines 34-63.
44
+
45
+ Args:
46
+ jsonl_path: Path to the JSONL file
47
+ batch_size: Number of items per batch
48
+ start_from_id: Skip items with id < start_from_id
49
+ required_fields: List of field names that must be present in each item
50
+ """
51
+ required_fields = required_fields or []
52
+
53
+ with open(jsonl_path, "r", encoding="utf-8") as f:
54
+ batch = []
55
+ for line in f:
56
+ try:
57
+ obj = json.loads(line)
58
+ except json.JSONDecodeError:
59
+ continue
60
+
61
+ # Check required fields
62
+ if not all(k in obj for k in required_fields):
63
+ continue
64
+ if not isinstance(obj.get("id"), int):
65
+ continue
66
+ if obj["id"] < start_from_id:
67
+ continue
68
+
69
+ batch.append(obj)
70
+ if len(batch) == batch_size:
71
+ yield batch
72
+ batch = []
73
+ if batch:
74
+ yield batch
75
+
76
+
77
+ def write_jsonl_line(filepath: str, data: Dict) -> None:
78
+ """Append a single JSON object as a line to a JSONL file."""
79
+ with open(filepath, "a", encoding="utf-8") as f:
80
+ f.write(json.dumps(data, ensure_ascii=False) + "\n")
81
+
82
+
83
+ def write_jsonl_batch(filepath: str, batch: List[Dict]) -> None:
84
+ """Append multiple JSON objects to a JSONL file."""
85
+ with open(filepath, "a", encoding="utf-8") as f:
86
+ for data in batch:
87
+ f.write(json.dumps(data, ensure_ascii=False) + "\n")
88
+
89
+
90
+ def count_jsonl_lines(filepath: str) -> int:
91
+ """Count the number of lines in a JSONL file."""
92
+ if not os.path.exists(filepath):
93
+ return 0
94
+ with open(filepath, "r", encoding="utf-8") as f:
95
+ return sum(1 for _ in f)
synthgen/PtPersonaIFGen/phase1_generated_output.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99b5fa5666d10835a1fed33b3879ddc5f2137f022bf4ce90e8b86069b9da5fd3
3
+ size 184203496
synthgen/PtPersonaIFGen/phase2_detection_output.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39abb35f2fd980b4cac2413dc3c8af1d79f664217c3b405f3821f21c6455187a
3
+ size 175817157
synthgen/PtPersonaIFGen/phase3_extraction_output.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14a189257c1e4a3f690f624da872613b9cc790e7c0053c65badc3542f3d57d2d
3
+ size 46593824
synthgen/PtPersonaIFGen/phase4_final_output.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {"id": 0, "response": "Las últimas elecciones legislativas en El Salvador se llevaron a cabo el 4 de febrero de 2024, bajo la supervisión del Tribunal Supremo Electoral (TSE). Según los resultados oficiales certificados por el TSE, el partido Nuevas Ideas, liderado por el presidente Nayib Bukele, obtuvo una abrumadora mayoría en la Asamblea Legislativa.\n\nNuevas Ideas ganó 56 de los 60 escaños disponibles, consolidando su dominio absoluto en el Parlamento. En comparación con el período anterior (2021–2024), donde Nuevas Ideas contaba con 56 diputados de un total de 84 (antes de la reducción del número de diputados), esta victoria representa una concentración aún mayor del poder legislativo, especialmente tras la reducción del número de diputados de 84 a 60 decretada en 2023.\n\nLos cuatro escaños restantes fueron distribuidos de la siguiente manera: el partido Alianza Republicana Nacionalista (ARENA) obtuvo 3 diputados, mientras que el partido Gran Alianza por la Unidad Nacional (GANA) obtuvo 1. Estos resultados marcan una drástica disminución de la representación opositora en comparación con el ciclo anterior, en el que ARENA tenía 14 diputados y GANA contaba con 10.\n\nEn resumen, la nueva composición del Parlamento salvadoreño queda así:\n\n- **Nuevas Ideas**: 56 diputados \n- **ARENA**: 3 diputados \n- **GANA**: 1 diputado \n- **Total de escaños**: 60\n\nEsta distribución refleja una significativa centralización del poder legislativo en torno a Nuevas Ideas, limitando sustancialmente la capacidad de oposición dentro de la Asamblea Legislativa.", "instruction_id_list": ["language:response_language"], "kwargs": "[{\"language\": \"es\"}]", "prompt": "Forneça um resumo das últimas eleições legislativas em El Salvador, destacando os partidos que obtiveram mais assentos e a distribuição atual das cadeiras no Parlamento, com ênfase nas mudanças em relação ao ciclo anterior. A resposta deve estar inteiramente em **espanhol** y use solo datos oficiales del Tribunal Supremo Electoral de 2024.", "key": 0, "prompt_level_strict_acc": true, "inst_level_strict_acc": [true], "prompt_level_loose_acc": true, "inst_level_loose_acc": [true]}
2
+ {"id": 2, "response": "Olá, [nome da mãe]! Tudo bem com você e seu bebê? Estamos aqui para cuidar de você neste momento tão especial. 🌸 \n\nVocê deu à luz em [data do parto], e sabemos que cada dia traz novas emoções. Queremos lembrar você dos sinais importantes que podem indicar complicações nos primeiros 60 dias pós-parto: \n\n🟥 **Hemorragia**: sangramento muito forte (encharcar mais de uma proteção por hora), coágulos grandes ou sangramento que volta após ter parado. \n🟨 **Infecção**: febre acima de 38°C, dor abdominal intensa, mau cheiro na secreção vaginal ou vermelhidão/dor na cesária ou episiotomia. \n🟦 **Depressão pós-parto**: tristeza profunda, choro frequente, dificuldade para se ligar ao bebê, pensamentos de se machucar ou de não querer viver. \n\nSe notar qualquer um desses sinais, **não espere**! Procure ajuda imediatamente. \nEntre em contato com [unidade de saúde mais próxima] ou ligue para [número de emergência] – estamos aqui por você. \n\nSua saúde importa. Você não está sozinha. O [nome do acompanhante] também está aqui para te apoiar. \n\nSua próxima consulta está marcada para [data da próxima consulta]. Vamos juntas nesse cuidado? 💛 \n\nCom carinho, \nEquipe de Saúde", "instruction_id_list": ["detectable_content:number_placeholders"], "kwargs": "[{\"num_placeholders\": 6}]", "prompt": "Por favor, gere um modelo de mensagem de alerta que posso enviar por WhatsApp para mulheres nos primeiros 60 dias pós-parto, lembrando-as dos sinais de alerta de complicações como hemorragia, infecção ou depressão pós-parto. A mensagem deve ser clara, empática e incluir um número de telefone para emergências. Inclua os seguintes placeholders onde apropriado: [nome da mãe], [data do parto], [nome do acompanhante], [número de emergência], [unidade de saúde mais próxima] e [data da próxima consulta].", "key": 2, "prompt_level_strict_acc": true, "inst_level_strict_acc": [true], "prompt_level_loose_acc": true, "inst_level_loose_acc": [true]}
3
+ {"id": 3, "response": "* Investimentos em esportes promovem saúde física e bem-estar mental entre os alunos, reduzindo problemas como sedentarismo e ansiedade. \n* Atletas bem treinados podem representar a escola em competições estaduais e nacionais, aumentando o prestígio institucional. \n* A prática esportiva desenvolve disciplina, trabalho em equipe e liderança, habilidades transferíveis para a vida acadêmica e profissional. \n* Times esportivos estruturados tendem a atrair patrocínios e apoio da comunidade local, gerando retornos financeiros indiretos para a escola. \n* Eventos do grêmio, embora importantes, geralmente têm menor impacto duradouro em comparação com os benefícios de longo prazo trazidos por programas esportivos consistentes.", "instruction_id_list": ["detectable_format:number_bullet_lists", "length_constraints:number_sentences"], "kwargs": "[{\"num_bullets\": 5}, {\"num_sentences\": null, \"relation\": \"at least\"}]", "prompt": "Preciso de argumentos convincentes para apresentar ao diretor sobre por que o orçamento da escola deveria destinar mais recursos para os times esportivos em vez de eventos organizados pelo grêmio estudantil. Pode me ajudar com uma lista clara e objetiva? \n\nPor favor, organize a resposta em uma lista com marcadores markdown contendo exatamente 5 itens.", "key": 3, "prompt_level_strict_acc": true, "inst_level_strict_acc": [true, true], "prompt_level_loose_acc": false, "inst_level_loose_acc": [true, false]}
synthgen/PtPersonaIFGen/run.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf722f022c4b5d96774d90ffc11ad391695d9307aa4df40a230457addbc00aac
3
+ size 188817008
synthgen/PtPersonaIFGen/run_old.log ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nohup: ignoring input
2
+ ============================================================
3
+ IFEval-like Data Generation Pipeline
4
+ ============================================================
5
+ vLLM URL: http://localhost:8000/v1
6
+ Batch size: 4
7
+ Num rows: 6
8
+ Running: Phase 1 onwards
9
+
10
+ ============================================================
11
+ PHASE 1: Magpie Instruction + Response Generation
12
+ ============================================================
13
+ Using model: Qwen/Qwen3-235B-A22B-Instruct-2507-FP8
14
+ Persona source: proj-persona/PersonaHub/persona
15
+ Constraint schedule: 53 constraints, cycling in order.
16
+ First: l
17
+ Last: c
18
+ Starting from scratch.
19
+ Generating 6 persona-driven instruction-response pairs...
20
+
21
+ Printing user instruction: [{'role': 'system', 'content': 'Você é um gerador de dados de treino para modelos de linguagem. Você recebe uma persona e uma restrição de formatação, e escreve uma instrução realista em português que alguém faria a um assistente de IA.'}, {'role': 'user', 'content': 'Persona: A legal advisor who understands the legal implications of incomplete or inaccurate project documentation\n\nRestrição que a resposta do assistente deve seguir: A resposta deve conter um número específico de frases.\n\nEscreva uma pergunta ou pedido realista que essa persona faria a um assistente de IA. A instrução deve pedir algo concreto e útil, e deve incluir a restrição de forma natural no final — com valores específicos escolhidos por você (palavras, números, letras, frases) que façam sentido para o contexto. Use a persona apenas como inspiração para o tópico e tom, não force referências a ela.'}]
22
+ Printing user instruction: [{'role': 'system', 'content': 'Você é um gerador de dados de treino para modelos de linguagem. Você recebe uma persona e uma restrição de formatação, e escreve uma instrução realista em português que alguém faria a um assistente de IA.'}, {'role': 'user', 'content': 'Persona: A maternal health advocate focused on raising awareness about postpartum complications.\n\nRestrição que a resposta do assistente deve seguir: A resposta deve incluir um certo número de placeholders entre colchetes, como [nome] ou [endereço].\n\nEscreva uma pergunta ou pedido realista que essa persona faria a um assistente de IA. A instrução deve pedir algo concreto e útil, e deve incluir a restrição de forma natural no final — com valores específicos escolhidos por você (palavras, números, letras, frases) que façam sentido para o contexto. Use a persona apenas como inspiração para o tópico e tom, não force referências a ela.'}]
23
+ Printing user instruction: [{'role': 'system', 'content': 'Você é um gerador de dados de treino para modelos de linguagem. Você recebe uma persona e uma restrição de formatação, e escreve uma instrução realista em português que alguém faria a um assistente de IA.'}, {'role': 'user', 'content': 'Persona: A school basketball team captain who believes sports and their funding should be prioritized over student council campaigns\n\nRestrição que a resposta do assistente deve seguir: A resposta deve conter um número específico de itens em lista com marcadores markdown.\n\nEscreva uma pergunta ou pedido realista que essa persona faria a um assistente de IA. A instrução deve pedir algo concreto e útil, e deve incluir a restrição de forma natural no final — com valores específicos escolhidos por você (palavras, números, letras, frases) que façam sentido para o contexto. Use a persona apenas como inspiração para o tópico e tom, não force referências a ela.'}]
24
+ Printing user instruction: [{'role': 'system', 'content': 'Você é um assistente de IA prestativo. Responda à consulta do usuário de forma útil e precisa, seguindo RIGOROSAMENTE a restrição de formatação/conteúdo especificada na mensagem do usuário.\n\nRestrição que você deve seguir:\nSua resposta deve conter exatamente o número de frases especificado pelo usuário ("menos de N" ou "pelo menos N").\n\nProduza APENAS a resposta. Nada mais.'}, {'role': 'user', 'content': 'Por favor, elabore um aviso para equipes de projeto sobre os riscos legais de manter registros incompletos, destacando as consequências em caso de auditoria ou litígio. A resposta deve conter exatamente 5 frases.'}]
25
+ Printing user instruction: [{'role': 'system', 'content': 'Você é um assistente de IA prestativo. Responda à consulta do usuário de forma útil e precisa, seguindo RIGOROSAMENTE a restrição de formatação/conteúdo especificada na mensagem do usuário.\n\nRestrição que você deve seguir:\nSua resposta deve conter exatamente o número de marcadores especificado pelo usuário. Use marcadores markdown: * Este é o ponto 1.\n\nProduza APENAS a resposta. Nada mais.'}, {'role': 'user', 'content': 'Preciso de argumentos convincentes para apresentar ao diretor sobre por que o orçamento da escola deveria destinar mais recursos para os times esportivos em vez de eventos organizados pelo grêmio estudantil. Pode me ajudar com uma lista clara e objetiva? \n\nPor favor, organize a resposta em uma lista com marcadores markdown contendo exatamente 5 itens.'}]
26
+ Printing user instruction: [{'role': 'system', 'content': 'Você é um assistente de IA prestativo. Responda à consulta do usuário de forma útil e precisa, seguindo RIGOROSAMENTE a restrição de formatação/conteúdo especificada na mensagem do usuário.\n\nRestrição que você deve seguir:\nSua resposta INTEIRA deve estar no idioma especificado pelo usuário. Nenhum outro idioma é permitido.\n\nProduza APENAS a resposta. Nada mais.'}, {'role': 'user', 'content': 'Forneça um resumo das últimas eleições legislativas em El Salvador, destacando os partidos que obtiveram mais assentos e a distribuição atual das cadeiras no Parlamento, com ênfase nas mudanças em relação ao ciclo anterior. A resposta deve estar inteiramente em **espanhol** y use solo datos oficiales del Tribunal Supremo Electoral de 2024.'}]
27
+ Printing user instruction: [{'role': 'system', 'content': 'Você é um assistente de IA prestativo. Responda à consulta do usuário de forma útil e precisa, seguindo RIGOROSAMENTE a restrição de formatação/conteúdo especificada na mensagem do usuário.\n\nRestrição que você deve seguir:\nSua resposta deve incluir pelo menos o número de placeholders entre colchetes especificado pelo usuário, como [endereço] ou [nome].\n\nProduza APENAS a resposta. Nada mais.'}, {'role': 'user', 'content': 'Por favor, gere um modelo de mensagem de alerta que posso enviar por WhatsApp para mulheres nos primeiros 60 dias pós-parto, lembrando-as dos sinais de alerta de complicações como hemorragia, infecção ou depressão pós-parto. A mensagem deve ser clara, empática e incluir um número de telefone para emergências. Inclua os seguintes placeholders onde apropriado: [nome da mãe], [data do parto], [nome do acompanhante], [número de emergência], [unidade de saúde mais próxima] e [data da próxima consulta].'}]
28
+ Printing user instruction: [{'role': 'system', 'content': 'Você é um gerador de dados de treino para modelos de linguagem. Você recebe uma persona e uma restrição de formatação, e escreve uma instrução realista em português que alguém faria a um assistente de IA.'}, {'role': 'user', 'content': 'Persona: A determined basketball player who aspires to be the star athlete of the school\n\nRestrição que a resposta do assistente deve seguir: A resposta deve ser exatamente uma de algumas opções predefinidas.\n\nEscreva uma pergunta ou pedido realista que essa persona faria a um assistente de IA. A instrução deve pedir algo concreto e útil, e deve incluir a restrição de forma natural no final — com valores específicos escolhidos por você (palavras, números, letras, frases) que façam sentido para o contexto. Use a persona apenas como inspiração para o tópico e tom, não force referências a ela.'}]
29
+
30
+ Printing user instruction: [{'role': 'system', 'content': 'Você é um gerador de dados de treino para modelos de linguagem. Você recebe uma persona e uma restrição de formatação, e escreve uma instrução realista em português que alguém faria a um assistente de IA.'}, {'role': 'user', 'content': 'Persona: A virtual reality content creator sharing their experiences and creations on a popular online platform\n\nRestrição que a resposta do assistente deve seguir: A resposta deve destacar um número específico de seções usando markdown.\n\nEscreva uma pergunta ou pedido realista que essa persona faria a um assistente de IA. A instrução deve pedir algo concreto e útil, e deve incluir a restrição de forma natural no final — com valores específicos escolhidos por você (palavras, números, letras, frases) que façam sentido para o contexto. Use a persona apenas como inspiração para o tópico e tom, não force referências a ela.'}]
31
+ Printing user instruction: [{'role': 'system', 'content': 'Você é um assistente de IA prestativo. Responda à consulta do usuário de forma útil e precisa, seguindo RIGOROSAMENTE a restrição de formatação/conteúdo especificada na mensagem do usuário.\n\nRestrição que você deve seguir:\nSua resposta deve ser exatamente uma das opções predefinidas listadas pelo usuário. Não acrescente nada além da opção escolhida.\n\nProduza APENAS a resposta. Nada mais.'}, {'role': 'user', 'content': 'Como posso melhorar meu arremesso de três pontos em treinos diários de 30 minutos? Responda com apenas uma das seguintes opções: "foco na mecânica do arremesso", "aumente a repetição controlada", "treine com fadiga muscular".'}]
32
+ Printing user instruction: [{'role': 'system', 'content': 'Você é um assistente de IA prestativo. Responda à consulta do usuário de forma útil e precisa, seguindo RIGOROSAMENTE a restrição de formatação/conteúdo especificada na mensagem do usuário.\n\nRestrição que você deve seguir:\nDestaque pelo menos o número de seções especificado pelo usuário usando markdown: *seção destacada*.\n\nProduza APENAS a resposta. Nada mais.'}, {'role': 'user', 'content': 'Preciso de ajuda para estruturar um tutorial detalhado sobre como montar uma experiência imersiva em realidade virtual usando assets gratuitos. Por favor, organize o conteúdo em **5 seções principais** que cubram desde a escolha da engine até a publicação final. A resposta deve destacar **5 seções** usando markdown.'}]
33
+ Phase 1 complete. Output: phase1_generated_output.jsonl
34
+
35
+ ============================================================
36
+ PHASE 2: Constraint Type Detection
37
+ ============================================================
38
+ Using model: Qwen/Qwen3-235B-A22B-Instruct-2507-FP8
39
+ Starting from scratch.
40
+ Processing 6 items from phase1_generated_output.jsonl...
41
+
42
+ Phase 2 complete. Output: phase2_detection_output.jsonl
43
+
44
+ ============================================================
45
+ PHASE 3: Parameter Extraction
46
+ ============================================================
47
+ Using model: Qwen/Qwen3-235B-A22B-Instruct-2507-FP8
48
+ Starting from scratch.
49
+ Processing 6 items from phase2_detection_output.jsonl...
50
+
51
+ Phase 3 complete. Output: phase3_extraction_output.jsonl
52
+
53
+ ============================================================
54
+ PHASE 4: Post-Processing
55
+ ============================================================
56
+ Processing 6 items from phase3_extraction_output.jsonl...
57
+
58
+
59
+ ==================================================
60
+ Post-processing Summary
61
+ ==================================================
62
+ Total processed: 6
63
+ Invalid kwargs: 0
64
+ Conflicting: 1
65
+ Failed validation: 2
66
+ Passed (final): 3
67
+ Pass rate: 50.0%
68
+ ==================================================
69
+ Output: phase4_final_output.jsonl
70
+
71
+ ==================================================
72
+ DEBUG: Sample CONFLICT failures (first 10):
73
+ ==================================================
74
+
75
+ --- ID: 5 ---
76
+ instruction_id_list: ['detectable_format:multiple_sections', 'detectable_format:number_highlighted_sections']
77
+
78
+ ==================================================
79
+ DEBUG: Sample VALIDATION failures (first 10):
80
+ ==================================================
81
+
82
+ --- ID: 1 ---
83
+ instruction_id_list: ['length_constraints:number_sentences']
84
+ kwargs: [{"num_sentences": 5, "relation": "less than"}]
85
+ inst_level_strict_acc: [False]
86
+ response_preview: Registros incompletos podem comprometer a integridade legal de um projeto e gerar responsabilidades para a organização. Em caso de auditoria, a ausência de documentação adequada pode resultar em multas, sanções ou suspensão de atividades. Durante um litígio, documentos faltantes enfraquecem a posição da empresa e podem levar à perda de processos judiciais. A manutenção de registros completos e atualizados é essencial para demonstrar conformidade e boa-fé. Todas as equipes devem garantir que toda...
87
+
88
+ --- ID: 4 ---
89
+ instruction_id_list: ['detectable_format:constrained_response']
90
+ kwargs: [{}]
91
+ inst_level_strict_acc: [False]
92
+ response_preview: foco na mecânica do arremesso...
93
+
94
+ ============================================================
95
+ PIPELINE COMPLETE
96
+ ============================================================
97
+ Final output: phase4_final_output.jsonl
synthgen/__pycache__/evaluate.cpython-311.pyc ADDED
Binary file (15.4 kB). View file