Update README.md
Browse files
README.md
CHANGED
|
@@ -121,7 +121,7 @@ python -m scripts.save \
|
|
| 121 |
accelerate launch --num_processes 8 -m scripts.eval \
|
| 122 |
--phase generate \
|
| 123 |
--model_path "logs/pipelines/test/checkpoint-2000" \
|
| 124 |
-
--eval_metrics imagereward clipscore pickscore hpsv2 hpsv3 aesthetic ocr dpgbench
|
| 125 |
--output_dir "logs/evaluations/test" \
|
| 126 |
--base_model sd3 \
|
| 127 |
--save_images
|
|
@@ -142,7 +142,7 @@ pip install fairseq --no-deps
|
|
| 142 |
|
| 143 |
accelerate launch --num_processes 8 -m scripts.eval \
|
| 144 |
--phase evaluate \
|
| 145 |
-
--eval_metrics imagereward clipscore pickscore hpsv2 hpsv3 aesthetic ocr dpgbench
|
| 146 |
--output_dir "logs/evaluations/test"
|
| 147 |
```
|
| 148 |
|
|
|
|
| 121 |
accelerate launch --num_processes 8 -m scripts.eval \
|
| 122 |
--phase generate \
|
| 123 |
--model_path "logs/pipelines/test/checkpoint-2000" \
|
| 124 |
+
--eval_metrics imagereward clipscore pickscore hpsv2 hpsv3 aesthetic ocr dpgbench \
|
| 125 |
--output_dir "logs/evaluations/test" \
|
| 126 |
--base_model sd3 \
|
| 127 |
--save_images
|
|
|
|
| 142 |
|
| 143 |
accelerate launch --num_processes 8 -m scripts.eval \
|
| 144 |
--phase evaluate \
|
| 145 |
+
--eval_metrics imagereward clipscore pickscore hpsv2 hpsv3 aesthetic ocr dpgbench \
|
| 146 |
--output_dir "logs/evaluations/test"
|
| 147 |
```
|
| 148 |
|