| |
| import argparse |
| import os |
| import sys |
| import time |
|
|
| from google import genai |
|
|
|
|
| DEFAULT_PROMPT = ( |
| "Please describe the exact sequence of events in this video. " |
| "Specifically, when does the loudest sound occur in relation to the visual impact?" |
| ) |
|
|
|
|
| def wait_until_file_ready(client: genai.Client, file_name: str, timeout_sec: int = 300) -> None: |
| start = time.time() |
| while True: |
| f = client.files.get(name=file_name) |
| state = getattr(getattr(f, "state", None), "name", None) or str(getattr(f, "state", "")) |
|
|
| if state == "ACTIVE": |
| return |
| if state == "FAILED": |
| raise RuntimeError(f"File processing failed: {file_name}") |
| if time.time() - start > timeout_sec: |
| raise TimeoutError(f"Timed out waiting for file to become ACTIVE: {file_name}") |
|
|
| time.sleep(2) |
|
|
|
|
| def analyze_one_video(client: genai.Client, model_name: str, video_path: str, prompt: str) -> str: |
| if not os.path.exists(video_path): |
| raise FileNotFoundError(f"Video file not found: {video_path}") |
|
|
| uploaded = client.files.upload(file=video_path) |
| wait_until_file_ready(client, uploaded.name) |
|
|
| response = client.models.generate_content( |
| model=model_name, |
| contents=[uploaded, prompt], |
| ) |
| return (response.text or "").strip() |
|
|
|
|
| def main() -> None: |
| parser = argparse.ArgumentParser(description="Compare Gemini responses on two videos.") |
| parser.add_argument("--video-a", required=True, help="Path to first video (e.g., original).") |
| parser.add_argument("--video-b", required=True, help="Path to second video (e.g., volume=0).") |
| parser.add_argument("--label-a", default="original", help="Display label for video A.") |
| parser.add_argument("--label-b", default="volume0", help="Display label for video B.") |
| parser.add_argument("--model", default="gemini-3.1-pro", help="Gemini model name.") |
| parser.add_argument("--prompt", default=DEFAULT_PROMPT, help="Prompt to send.") |
| args = parser.parse_args() |
|
|
| api_key = os.environ.get("GEMINI_API_KEY") |
| if not api_key: |
| print("Error: GEMINI_API_KEY is not set.", file=sys.stderr) |
| sys.exit(1) |
|
|
| client = genai.Client(api_key=api_key) |
|
|
| print(f"Model: {args.model}") |
| print(f"Prompt: {args.prompt}\n") |
|
|
| print(f"Analyzing {args.label_a}: {args.video_a}") |
| out_a = analyze_one_video(client, args.model, args.video_a, args.prompt) |
| print(f"\n===== {args.label_a} =====") |
| print(out_a if out_a else "<empty response>") |
|
|
| print(f"\nAnalyzing {args.label_b}: {args.video_b}") |
| out_b = analyze_one_video(client, args.model, args.video_b, args.prompt) |
| print(f"\n===== {args.label_b} =====") |
| print(out_b if out_b else "<empty response>") |
|
|
| print("\n===== Quick check =====") |
| print( |
| "- If timing/detail around loudest sound changes significantly between the two videos, " |
| "audio is likely being used.\n" |
| "- If responses are nearly identical, audio influence is weak or absent." |
| ) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|