from __future__ import annotations
import json, time
from pathlib import Path
from scripts.kling_tasks import create_omni_video, create_text_to_video, create_image_to_video, create_reference_to_video
from scripts.builders import build_omni_first_frame_payload, build_text2video_payload, build_image2video_payload, build_reference2video_payload
from scripts.kling_client import KlingClient, KlingClientError
from configs.kling import KlingConfig

ROOT = Path(__file__).resolve().parents[1]
OUT = ROOT / 'outputs' / 'phase3_expanded_minset_results.json'
ASSETS = ROOT / 'samples' / 'kling-api' / 'probe-assets'
ASSETS.mkdir(parents=True, exist_ok=True)

# ensure a known-good 512x512 image exists
from PIL import Image
img_path = ASSETS / 'probe_512.png'
if not img_path.exists():
    Image.new('RGB', (512, 512), color=(180, 120, 90)).save(img_path)

# Current runtime policy now prefers our own assets as SOT and attaches actual image bytes as raw base64
# wherever the endpoint contract permits inline image data. Omni still keeps the explicit
# `image_list[].image_url` shape; non-Omni image2video keeps `image` / `image_tail`.
# For current minimal round, keep Omni first-frame skipped unless a remote image URL is already available.
remote_image_url = None
remote_video_url = None

runs = []

def record(name, payload, fn, endpoint):
    item = {'name': name, 'endpoint': endpoint, 'payload': payload}
    try:
        res = fn(payload, external_task_id=f'phase3-{name}-{int(time.time())}')
        item['ok'] = True
        item['response'] = res['response']
    except Exception as e:
        item['ok'] = False
        item['error'] = str(e)
        if isinstance(e, KlingClientError):
            item['category'] = e.category
            item['status_code'] = e.status_code
            item['response_body'] = e.response_body
    runs.append(item)

# 1) Omni baseline only if remote image URL exists
if remote_image_url:
    payload = build_omni_first_frame_payload(prompt='A calm woman looks at the camera and slowly smiles, natural motion, clean lighting.', image_url=remote_image_url, duration='5', mode='std', aspect_ratio='16:9')
    record('omni_baseline', payload, create_omni_video, 'omni')
else:
    runs.append({'name':'omni_baseline','endpoint':'omni','skipped':True,'reason':'no remote_image_url available for image_url-based Omni first-frame baseline'})

# 2) text2video minimal no-model
payload = build_text2video_payload(prompt='A calm woman looks at the camera and slowly smiles, natural motion, clean lighting.', duration='5', mode='std', aspect_ratio='16:9')
record('text2video_minimal', payload, create_text_to_video, 'text2video')

# 3) image2video minimal no-model; builder now converts local asset paths to raw base64 so the
# request body reflects the production asset-handling policy without changing endpoint shape.
payload = build_image2video_payload(image=str(img_path), prompt='A calm woman looks at the camera and slowly smiles, natural motion, clean lighting.', duration='5', mode='std', aspect_ratio='16:9')
record('image2video_minimal', payload, create_image_to_video, 'image2video')

# 4) reference2video only if remote image URL exists
if remote_image_url:
    payload = build_reference2video_payload(image_list=[{'image_url': remote_image_url}], prompt='A calm woman looks at the camera and slowly smiles, natural motion, clean lighting.', duration='5', mode='std', aspect_ratio='16:9')
    record('reference2video_minimal', payload, create_reference_to_video, 'reference2video')
else:
    runs.append({'name':'reference2video_minimal','endpoint':'reference2video','skipped':True,'reason':'no remote_image_url available for reference image_list baseline'})

OUT.write_text(json.dumps({'runs': runs}, ensure_ascii=False, indent=2))
print(str(OUT))
