DocUA commited on
Commit
085df7b
·
1 Parent(s): 6a09f03

chore: Update dependencies and adjust interface logic.

Browse files
Files changed (2) hide show
  1. interface.py +119 -9
  2. requirements.txt +1 -0
interface.py CHANGED
@@ -389,6 +389,13 @@ async def process_batch_testing(
389
  provider: str,
390
  model_name: str,
391
  delay_seconds: float = 1.0,
 
 
 
 
 
 
 
392
  progress=gr.Progress()
393
  ) -> Tuple[str, Optional[str]]:
394
  """Process batch testing of legal position generation."""
@@ -418,7 +425,14 @@ async def process_batch_testing(
418
  input_type="text",
419
  comment_input="",
420
  provider=provider,
421
- model_name=model_name
 
 
 
 
 
 
 
422
  )
423
 
424
  # Store full JSON result
@@ -453,6 +467,7 @@ async def process_batch_testing(
453
  success_msg = f"✅ **Пакетне тестування завершено!**\n\n"
454
  success_msg += f"**Оброблено рядків:** {total_rows}\n"
455
  success_msg += f"**Модель:** {model_name}\n"
 
456
  success_msg += f"**Результати збережено в:** {output_path}\n\n"
457
  success_msg += f"**Нова колонка:** {result_column_name}\n"
458
 
@@ -886,15 +901,67 @@ def create_gradio_interface() -> gr.Blocks:
886
  choices=_available_providers,
887
  value=_default_provider,
888
  label="Провайдер AI",
 
889
  scale=1
890
  )
891
  batch_model_dropdown = gr.Dropdown(
892
  choices=_gen_models,
893
  value=_default_gen_model,
894
  label="Модель генерації",
895
- scale=1
 
896
  )
897
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
898
  delay_slider = gr.Slider(
899
  minimum=0,
900
  maximum=10,
@@ -937,7 +1004,14 @@ def create_gradio_interface() -> gr.Blocks:
937
  )
938
 
939
  download_results_file = gr.File(
940
- label="📥 Завантажити результати",
 
 
 
 
 
 
 
941
  visible=False
942
  )
943
 
@@ -991,7 +1065,7 @@ def create_gradio_interface() -> gr.Blocks:
991
  outputs=[batch_model_dropdown]
992
  )
993
 
994
- # thinking mode settings
995
  generation_provider_dropdown.change(
996
  fn=update_thinking_visibility,
997
  inputs=[generation_provider_dropdown],
@@ -1004,6 +1078,19 @@ def create_gradio_interface() -> gr.Blocks:
1004
  outputs=[thinking_type_dropdown, thinking_level_dropdown, thinking_budget_slider]
1005
  )
1006
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1007
  # generation and analysis
1008
  generate_position_button.click(
1009
  fn=lambda: (
@@ -1124,19 +1211,42 @@ def create_gradio_interface() -> gr.Blocks:
1124
  outputs=[start_batch_button]
1125
  )
1126
 
 
 
 
1127
  start_batch_button.click(
 
 
 
 
 
 
 
 
 
1128
  fn=process_batch_testing,
1129
  inputs=[
1130
  batch_df_state,
1131
  batch_provider_dropdown,
1132
  batch_model_dropdown,
1133
- delay_slider
 
 
 
 
 
 
 
1134
  ],
1135
- outputs=[batch_output, download_results_file]
1136
  ).then(
1137
- fn=lambda output_path: gr.update(visible=output_path is not None, value=output_path),
1138
- inputs=[download_results_file],
1139
- outputs=[download_results_file]
 
 
 
 
1140
  )
1141
 
1142
  # Removed app.load call to avoid startup race condition with session state
 
389
  provider: str,
390
  model_name: str,
391
  delay_seconds: float = 1.0,
392
+ thinking_enabled: bool = False,
393
+ thinking_type: str = "Adaptive",
394
+ thinking_level: str = "medium",
395
+ openai_verbosity: str = "medium",
396
+ thinking_budget: int = 10000,
397
+ temperature: float = 0.5,
398
+ max_tokens: int = 4000,
399
  progress=gr.Progress()
400
  ) -> Tuple[str, Optional[str]]:
401
  """Process batch testing of legal position generation."""
 
425
  input_type="text",
426
  comment_input="",
427
  provider=provider,
428
+ model_name=model_name,
429
+ thinking_enabled=thinking_enabled,
430
+ thinking_type=thinking_type,
431
+ thinking_level=thinking_level,
432
+ openai_verbosity=openai_verbosity,
433
+ thinking_budget=thinking_budget,
434
+ temperature=temperature,
435
+ max_tokens=max_tokens
436
  )
437
 
438
  # Store full JSON result
 
467
  success_msg = f"✅ **Пакетне тестування завершено!**\n\n"
468
  success_msg += f"**Оброблено рядків:** {total_rows}\n"
469
  success_msg += f"**Модель:** {model_name}\n"
470
+ success_msg += f"**Температура:** {temperature} | **Max Tokens:** {max_tokens}\n"
471
  success_msg += f"**Результати збережено в:** {output_path}\n\n"
472
  success_msg += f"**Нова колонка:** {result_column_name}\n"
473
 
 
901
  choices=_available_providers,
902
  value=_default_provider,
903
  label="Провайдер AI",
904
+ container=False,
905
  scale=1
906
  )
907
  batch_model_dropdown = gr.Dropdown(
908
  choices=_gen_models,
909
  value=_default_gen_model,
910
  label="Модель генерації",
911
+ container=False,
912
+ scale=2
913
  )
914
 
915
+ # Advanced Settings Accordion (mirrors Generation tab)
916
+ with gr.Accordion("⚙️ Додаткові параметри", open=False) as batch_thinking_accordion:
917
+ with gr.Row():
918
+ batch_temp_slider = gr.Slider(
919
+ minimum=0.0,
920
+ maximum=2.0,
921
+ value=0.5,
922
+ step=0.1,
923
+ label="Температура генерації (креативність)"
924
+ )
925
+ batch_max_tokens_slider = gr.Slider(
926
+ minimum=512,
927
+ maximum=32768,
928
+ value=4000,
929
+ step=512,
930
+ label="Max Tokens (ліміт відповіді)"
931
+ )
932
+ batch_thinking_enabled_checkbox = gr.Checkbox(
933
+ label="Увімкнути режим Thinking (глибокий аналіз)",
934
+ value=False,
935
+ info="Активує розширений ланцюг міркувань (Gemini 3+, Claude 4.5/4.6)"
936
+ )
937
+ with gr.Row():
938
+ batch_thinking_type_dropdown = gr.Dropdown(
939
+ choices=["Adaptive", "Enabled"],
940
+ value="Adaptive",
941
+ label="Тип Thinking (Claude)",
942
+ interactive=False
943
+ )
944
+ batch_thinking_level_dropdown = gr.Dropdown(
945
+ choices=["none", "low", "medium", "high", "xhigh"],
946
+ value="medium",
947
+ label="Рівень Thinking (OpenAI/Gemini)",
948
+ interactive=False
949
+ )
950
+ batch_openai_verbosity_dropdown = gr.Dropdown(
951
+ choices=["low", "medium", "high"],
952
+ value="medium",
953
+ label="Verbosity (OpenAI GPT-5)",
954
+ interactive=True
955
+ )
956
+ batch_thinking_budget_slider = gr.Slider(
957
+ minimum=1024,
958
+ maximum=32000,
959
+ value=10000,
960
+ step=1024,
961
+ label="Бюджет токенів (Claude 4.5)",
962
+ interactive=False
963
+ )
964
+
965
  delay_slider = gr.Slider(
966
  minimum=0,
967
  maximum=10,
 
1004
  )
1005
 
1006
  download_results_file = gr.File(
1007
+ label="📥 Завантажити результати (CSV)",
1008
+ visible=False,
1009
+ interactive=False
1010
+ )
1011
+
1012
+ download_results_btn = gr.DownloadButton(
1013
+ label="⬇️ Вигрузити результати",
1014
+ variant="secondary",
1015
  visible=False
1016
  )
1017
 
 
1065
  outputs=[batch_model_dropdown]
1066
  )
1067
 
1068
+ # thinking mode settings — Generation tab
1069
  generation_provider_dropdown.change(
1070
  fn=update_thinking_visibility,
1071
  inputs=[generation_provider_dropdown],
 
1078
  outputs=[thinking_type_dropdown, thinking_level_dropdown, thinking_budget_slider]
1079
  )
1080
 
1081
+ # thinking mode settings — Batch Testing tab
1082
+ batch_provider_dropdown.change(
1083
+ fn=update_thinking_visibility,
1084
+ inputs=[batch_provider_dropdown],
1085
+ outputs=[batch_thinking_accordion]
1086
+ )
1087
+
1088
+ batch_thinking_enabled_checkbox.change(
1089
+ fn=update_thinking_level_interactive,
1090
+ inputs=[batch_thinking_enabled_checkbox],
1091
+ outputs=[batch_thinking_type_dropdown, batch_thinking_level_dropdown, batch_thinking_budget_slider]
1092
+ )
1093
+
1094
  # generation and analysis
1095
  generate_position_button.click(
1096
  fn=lambda: (
 
1211
  outputs=[start_batch_button]
1212
  )
1213
 
1214
+ # Internal state to keep the output file path
1215
+ batch_result_path_state = gr.State()
1216
+
1217
  start_batch_button.click(
1218
+ fn=lambda: (
1219
+ gr.update(value="⏳ **Пакетне тестування запущено...**\n\nОбробка рядків. Зачекайте, будь ласка."),
1220
+ gr.update(interactive=False),
1221
+ gr.update(visible=False),
1222
+ gr.update(visible=False)
1223
+ ),
1224
+ inputs=None,
1225
+ outputs=[batch_output, start_batch_button, download_results_file, download_results_btn]
1226
+ ).then(
1227
  fn=process_batch_testing,
1228
  inputs=[
1229
  batch_df_state,
1230
  batch_provider_dropdown,
1231
  batch_model_dropdown,
1232
+ delay_slider,
1233
+ batch_thinking_enabled_checkbox,
1234
+ batch_thinking_type_dropdown,
1235
+ batch_thinking_level_dropdown,
1236
+ batch_openai_verbosity_dropdown,
1237
+ batch_thinking_budget_slider,
1238
+ batch_temp_slider,
1239
+ batch_max_tokens_slider
1240
  ],
1241
+ outputs=[batch_output, batch_result_path_state]
1242
  ).then(
1243
+ fn=lambda output_path: (
1244
+ gr.update(interactive=True),
1245
+ gr.update(visible=output_path is not None, value=output_path),
1246
+ gr.update(visible=output_path is not None, value=output_path)
1247
+ ),
1248
+ inputs=[batch_result_path_state],
1249
+ outputs=[start_batch_button, download_results_file, download_results_btn]
1250
  )
1251
 
1252
  # Removed app.load call to avoid startup race condition with session state
requirements.txt CHANGED
@@ -18,3 +18,4 @@ pydantic>=2.0.0
18
  pydantic-settings
19
  huggingface-hub>=0.23.0
20
  openpyxl
 
 
18
  pydantic-settings
19
  huggingface-hub>=0.23.0
20
  openpyxl
21
+ gradio