Phase 3: Department Interpretation — Directing + Cinematography per scene

Layer 4 implementation:
- Per-scene directing: scene_objective, audience_takeaway, pacing, dramatic
  beats, subtext, continuity considerations
- Per-scene cinematography: camera style, lens, movement, framing, DoF,
  color palette, visual emphasis, continuity considerations
- All interpretation grounded in L2 scene data + L3 Production Bible
- Bible entries passed as context per scene (matching characters + location)
- Validator: empty fields, broken refs, bible ref checks, uncertain values
- Per-scene versioned output + combined department_interpretation_v1.json
- CLI: --phase 3, --scene N for single-scene re-run

Tested on the_last_backup: 12/12 scenes valid, 0 failures, 5 warnings
(false positives from prop names in all-caps like BLACK PORTABLE SSD)

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
profit 2026-04-06 17:31:23 -07:00
parent 17e410751c
commit 3b5ef3596a
7 changed files with 564 additions and 40 deletions

22
run.py
View File

@ -14,7 +14,7 @@ def main():
parser = argparse.ArgumentParser(description="AI Movie Production Pipeline") parser = argparse.ArgumentParser(description="AI Movie Production Pipeline")
parser.add_argument("--script", type=str, help="Path to .fountain script file (Phase 1)") parser.add_argument("--script", type=str, help="Path to .fountain script file (Phase 1)")
parser.add_argument("--project", type=str, help="Project name (determines output directory)") parser.add_argument("--project", type=str, help="Project name (determines output directory)")
parser.add_argument("--phase", type=int, default=None, choices=[1, 2], help="Run specific phase only (1=ingestion+extraction, 2=bible)") parser.add_argument("--phase", type=int, default=None, choices=[1, 2, 3], help="Run specific phase (1=extraction, 2=bible, 3=departments)")
parser.add_argument("--model", type=str, default="qwen3:14b", help="Model ID (default: qwen3:14b)") parser.add_argument("--model", type=str, default="qwen3:14b", help="Model ID (default: qwen3:14b)")
parser.add_argument("--backend", type=str, default="ollama", choices=["ollama", "anthropic"], help="AI backend (default: ollama)") parser.add_argument("--backend", type=str, default="ollama", choices=["ollama", "anthropic"], help="AI backend (default: ollama)")
parser.add_argument("--ollama-url", type=str, default="http://localhost:11434", help="Ollama server URL") parser.add_argument("--ollama-url", type=str, default="http://localhost:11434", help="Ollama server URL")
@ -40,6 +40,7 @@ def main():
run_phase1_flag = args.phase is None or args.phase == 1 run_phase1_flag = args.phase is None or args.phase == 1
run_phase2_flag = args.phase is None or args.phase == 2 run_phase2_flag = args.phase is None or args.phase == 2
run_phase3_flag = args.phase is None or args.phase == 3
# Phase 1: Script Ingestion + Understanding # Phase 1: Script Ingestion + Understanding
if run_phase1_flag: if run_phase1_flag:
@ -88,6 +89,25 @@ def main():
print(f"\nPHASE 2 FAILED: {bible_result.stop_reason}") print(f"\nPHASE 2 FAILED: {bible_result.stop_reason}")
sys.exit(1) sys.exit(1)
# Phase 3: Department Interpretation
if run_phase3_flag:
from src.departments.runner import run_phase3
dept_result = run_phase3(
project_name=args.project,
model=args.model,
backend=args.backend,
ollama_url=args.ollama_url,
api_key=api_key,
output_dir=args.output_dir,
scene_filter=args.scene,
dry_run=args.dry_run,
)
if not dept_result.success:
print(f"\nPHASE 3 FAILED: {dept_result.stop_reason}")
sys.exit(1)
print("\nPIPELINE COMPLETE") print("\nPIPELINE COMPLETE")
sys.exit(0) sys.exit(0)

View File

View File

@ -0,0 +1,114 @@
"""Layer 4 department interpretation generator.
Per-scene, sends scene data + relevant bible entries to AI,
receives directing + cinematography interpretations.
"""
import json
import requests
from dataclasses import dataclass
@dataclass
class DepartmentResult:
raw_data: dict
token_usage: dict
class DepartmentGenerationError(Exception):
pass
def generate_department_interpretation(
scene: dict,
character_entries: list[dict],
location_entry: dict | None,
contract_path: str,
model: str = "qwen3:14b",
backend: str = "ollama",
ollama_url: str = "http://localhost:11434",
api_key: str = "",
) -> DepartmentResult:
"""Generate directing + cinematography interpretation for a single scene."""
with open(contract_path, "r", encoding="utf-8") as f:
contract = json.load(f)
scene_json = json.dumps(scene, indent=2, ensure_ascii=False)
characters_json = json.dumps(character_entries, indent=2, ensure_ascii=False)
location_json = json.dumps(location_entry, indent=2, ensure_ascii=False) if location_entry else '{"note": "No matching location bible entry found"}'
user_prompt = (
contract["user_prompt_template"]
.replace("{{scene_json}}", scene_json)
.replace("{{characters_json}}", characters_json)
.replace("{{location_json}}", location_json)
)
if backend == "ollama":
text, usage = _call_ollama(model, contract["system_prompt"], user_prompt, contract["max_output_tokens"], ollama_url)
elif backend == "anthropic":
text, usage = _call_anthropic(model, contract["system_prompt"], user_prompt, contract["max_output_tokens"], api_key)
else:
raise DepartmentGenerationError(f"Unknown backend: {backend}")
try:
parsed = json.loads(text)
except json.JSONDecodeError as e:
cleaned = _extract_json(text)
if cleaned:
try:
parsed = json.loads(cleaned)
except json.JSONDecodeError:
raise DepartmentGenerationError(f"Not valid JSON: {e}\nResponse:\n{text[:500]}") from e
else:
raise DepartmentGenerationError(f"Not valid JSON: {e}\nResponse:\n{text[:500]}") from e
return DepartmentResult(raw_data=parsed, token_usage=usage)
def _call_ollama(model, system_prompt, user_prompt, max_tokens, ollama_url):
payload = {
"model": model,
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
"stream": False,
"options": {"temperature": 0, "num_predict": max_tokens, "num_ctx": 32768},
"format": "json",
}
try:
resp = requests.post(f"{ollama_url}/api/chat", json=payload, timeout=600)
resp.raise_for_status()
except requests.RequestException as e:
raise DepartmentGenerationError(f"Ollama API failed: {e}") from e
data = resp.json()
text = data.get("message", {}).get("content", "")
usage = {"input": data.get("prompt_eval_count", 0), "output": data.get("eval_count", 0)}
if not text:
raise DepartmentGenerationError("Empty response from Ollama")
return text, usage
def _call_anthropic(model, system_prompt, user_prompt, max_tokens, api_key):
from anthropic import Anthropic
client = Anthropic(api_key=api_key)
response = client.messages.create(
model=model, max_tokens=max_tokens, temperature=0,
system=system_prompt, messages=[{"role": "user", "content": user_prompt}],
)
return response.content[0].text, {
"input": response.usage.input_tokens, "output": response.usage.output_tokens,
}
def _extract_json(text):
if "```json" in text:
s = text.index("```json") + 7
e = text.index("```", s)
return text[s:e].strip()
if "```" in text:
s = text.index("```") + 3
e = text.index("```", s)
return text[s:e].strip()
return None

257
src/departments/runner.py Normal file
View File

@ -0,0 +1,257 @@
"""Layer 4 runner — reads L2 scenes + L3 bible, generates department interpretations per scene."""
import hashlib
import json
import os
from dataclasses import dataclass, field
from src.departments.generator import generate_department_interpretation, DepartmentGenerationError
from src.departments.validator import validate_department_breakdown, DeptValidationWarning
from src.schemas.department import SceneDepartmentBreakdown, DepartmentInterpretation
from src.validators.schema_validator import validate, ValidationResult
from src.logging.layer_logger import LayerLogger
from src.execution.output_writer import OutputWriter
from src.execution.retry import execute_with_retry, FailureRecord
from src.execution.stop_conditions import evaluate_stop
@dataclass
class DeptPipelineResult:
success: bool
total_scenes: int = 0
valid_scenes: int = 0
flagged_scenes: int = 0
failed_scenes: int = 0
warnings: list[DeptValidationWarning] = field(default_factory=list)
stop_reason: str | None = None
def run_phase3(
project_name: str,
model: str = "qwen3:14b",
backend: str = "ollama",
ollama_url: str = "http://localhost:11434",
api_key: str = "",
output_dir: str = "output",
scene_filter: int | None = None,
dry_run: bool = False,
) -> DeptPipelineResult:
"""Run Phase 3: department interpretation from L2 scenes + L3 bible."""
logger = LayerLogger(project_name, output_dir)
writer = OutputWriter(project_name, output_dir)
prompts_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "src", "prompts")
contract_path = os.path.join(prompts_dir, "L4_department_interpretation_v1.json")
# ── LOAD INPUTS ──────────────────────────────────────────────────────
print(f"[L4] Loading L2 scenes and L3 bible for: {project_name}")
scenes = _load_l2_scenes(project_name, output_dir)
if not scenes:
return DeptPipelineResult(success=False, stop_reason="No L2 scene outputs found")
bible = _load_production_bible(project_name, output_dir)
if bible is None:
return DeptPipelineResult(success=False, stop_reason="No L3 Production Bible found — run Phase 2 first")
characters_by_name = _index_characters(bible.get("characters", []))
locations_by_name = _index_locations(bible.get("locations", []))
all_character_names = set(characters_by_name.keys())
all_location_names = set(locations_by_name.keys())
print(f"[L4] Loaded {len(scenes)} scenes, {len(characters_by_name)} characters, {len(locations_by_name)} locations")
if scene_filter is not None:
scenes = [s for s in scenes if s.get("scene_number") == scene_filter]
if not scenes:
return DeptPipelineResult(success=False, stop_reason=f"Scene {scene_filter} not found in L2 outputs")
print(f"[L4] Filtered to scene {scene_filter}")
if dry_run:
print(f"[DRY RUN] {len(scenes)} scenes would get department interpretation.")
return DeptPipelineResult(success=True, total_scenes=len(scenes))
# ── PER-SCENE INTERPRETATION ─────────────────────────────────────────
all_results: list[ValidationResult | FailureRecord] = []
all_warnings: list[DeptValidationWarning] = []
all_breakdowns: list[SceneDepartmentBreakdown] = []
total_tokens = {"input": 0, "output": 0}
for scene in scenes:
sn = scene["scene_number"]
print(f"[L4] Scene {sn}: interpreting...")
run_id = logger.start("L4", scene_id=sn)
input_hash = f"sha256:{hashlib.sha256(json.dumps(scene).encode()).hexdigest()}"
# Gather relevant bible entries
scene_chars = scene.get("characters_present", [])
char_entries = _gather_character_entries(scene_chars, characters_by_name)
loc_entry = _find_location_entry(scene.get("location", ""), locations_by_name)
def do_interpret(data):
return generate_department_interpretation(
scene=data, character_entries=char_entries, location_entry=loc_entry,
contract_path=contract_path, model=model, backend=backend,
ollama_url=ollama_url, api_key=api_key,
)
result = execute_with_retry(
fn=do_interpret, input_data=scene, layer_id="L4", scene_id=sn,
)
if isinstance(result, FailureRecord):
logger.finish(run_id, input_hash, None, "failed",
failure_state=result.error, retry_count=len(result.attempts))
all_results.append(result)
print(f"[L4] Scene {sn}: FAILED after {len(result.attempts)} attempts")
continue
total_tokens["input"] += result.token_usage["input"]
total_tokens["output"] += result.token_usage["output"]
# Ensure scene number is in the raw data
raw = result.raw_data
if "scene" not in raw:
raw["scene"] = sn
# Schema validation
v = validate(raw, SceneDepartmentBreakdown)
all_results.append(v)
if v.status == "failed":
logger.finish(run_id, input_hash, None, "failed",
failure_state="; ".join(v.errors))
print(f"[L4] Scene {sn}: SCHEMA FAILED — {v.errors}")
continue
breakdown = v.data
# Semantic validation
scene_warnings = validate_department_breakdown(
breakdown, scene, all_character_names, all_location_names,
)
all_warnings.extend(scene_warnings)
# Write output
out = writer.write_named_raw("L4", f"scene_{sn:03d}_departments", raw)
status = "FLAGGED" if v.status == "flagged" or scene_warnings else "valid"
logger.finish(run_id, input_hash, out["hash"], v.status,
token_usage=result.token_usage)
if scene_warnings:
for w in scene_warnings:
print(f"[L4] Scene {sn} WARNING [{w.department}]: {w.message}")
print(f"[L4] Scene {sn}: {status}")
all_breakdowns.append(breakdown)
# ── STOP CONDITION CHECK ─────────────────────────────────────────────
stop = evaluate_stop(all_results, len(scenes))
if stop.should_stop:
print(f"[L4] STOP CONDITION: {stop.reason}")
return DeptPipelineResult(
success=False, total_scenes=len(scenes),
valid_scenes=len(all_breakdowns), stop_reason=stop.reason, warnings=all_warnings,
)
# ── WRITE COMBINED OUTPUT ────────────────────────────────────────────
if all_breakdowns:
combined = DepartmentInterpretation(breakdowns=all_breakdowns)
writer.write_named_raw("L4", "department_interpretation", combined.model_dump())
print(f"[L4] Combined interpretation written")
valid = sum(1 for r in all_results if isinstance(r, ValidationResult) and r.status in ("valid", "flagged"))
failed = sum(1 for r in all_results if isinstance(r, FailureRecord) or (isinstance(r, ValidationResult) and r.status == "failed"))
print(f"\n[DONE] Scenes: {valid} valid, {failed} failed")
print(f"[DONE] Warnings: {len(all_warnings)}")
print(f"[DONE] Tokens: {total_tokens['input']} in / {total_tokens['output']} out")
return DeptPipelineResult(
success=True, total_scenes=len(scenes),
valid_scenes=valid, failed_scenes=failed, warnings=all_warnings,
)
def _load_l2_scenes(project_name, output_dir):
l2_dir = os.path.join(output_dir, project_name, "L2")
latest_path = os.path.join(l2_dir, "latest.json")
if not os.path.exists(latest_path):
return []
with open(latest_path) as f:
manifest = json.load(f)
scenes = []
for key, ver in sorted(manifest.items(), key=lambda x: int(x[0]) if x[0].isdigit() else 0):
if not key.isdigit():
continue
fp = os.path.join(l2_dir, f"scene_{int(key):03d}_v{ver}.json")
if os.path.exists(fp):
with open(fp) as f:
scenes.append(json.load(f))
return scenes
def _load_production_bible(project_name, output_dir):
l3_dir = os.path.join(output_dir, project_name, "L3")
latest_path = os.path.join(l3_dir, "latest.json")
if not os.path.exists(latest_path):
return None
with open(latest_path) as f:
manifest = json.load(f)
ver = manifest.get("production_bible")
if not ver:
return None
fp = os.path.join(l3_dir, f"production_bible_v{ver}.json")
if not os.path.exists(fp):
return None
with open(fp) as f:
return json.load(f)
def _index_characters(characters):
idx = {}
for c in characters:
name = c.get("canonical_name", "")
idx[name] = c
for alias in c.get("aliases", []):
idx[alias] = c
return idx
def _index_locations(locations):
idx = {}
for loc in locations:
name = loc.get("canonical_name", "")
idx[name] = loc
for v in loc.get("variants", []):
idx[v] = loc
return idx
def _gather_character_entries(scene_chars, characters_by_name):
entries = []
seen = set()
for name in scene_chars:
# Try exact match, then partial
entry = characters_by_name.get(name)
if not entry:
for bible_name, bible_entry in characters_by_name.items():
if name.upper() in bible_name.upper() or bible_name.upper() in name.upper():
entry = bible_entry
break
if entry and entry.get("canonical_name") not in seen:
seen.add(entry["canonical_name"])
entries.append(entry)
return entries
def _find_location_entry(scene_location, locations_by_name):
entry = locations_by_name.get(scene_location)
if entry:
return entry
for name, loc in locations_by_name.items():
if scene_location.upper() in name.upper() or name.upper() in scene_location.upper():
return loc
return None

View File

@ -0,0 +1,143 @@
"""Layer 4 department interpretation validator."""
from dataclasses import dataclass
from typing import Literal
from src.schemas.department import SceneDepartmentBreakdown
@dataclass
class DeptValidationWarning:
type: Literal[
"missing_field",
"broken_scene_ref",
"broken_bible_ref",
"empty_department",
"uncertain_value",
"continuity_conflict",
]
scene: int
department: str
message: str
def validate_department_breakdown(
breakdown: SceneDepartmentBreakdown,
scene_data: dict,
character_names: set[str],
location_names: set[str],
) -> list[DeptValidationWarning]:
"""Validate a single scene's department interpretation against source data."""
warnings: list[DeptValidationWarning] = []
sn = breakdown.scene
# Verify scene number matches
if sn != scene_data.get("scene_number"):
warnings.append(DeptValidationWarning(
type="broken_scene_ref", scene=sn, department="general",
message=f"Breakdown scene {sn} doesn't match scene data scene_number {scene_data.get('scene_number')}",
))
# ── DIRECTING ────────────────────────────────────────────────────────
d = breakdown.directing
# Check for empty fields
for field_name in ["scene_objective", "audience_takeaway", "pacing_notes", "subtext_notes", "continuity_considerations"]:
val = getattr(d, field_name)
if not val or not val.strip():
warnings.append(DeptValidationWarning(
type="empty_department", scene=sn, department="directing",
message=f"Field '{field_name}' is empty",
))
if val and "UNCERTAIN" in val:
warnings.append(DeptValidationWarning(
type="uncertain_value", scene=sn, department="directing",
message=f"Field '{field_name}' contains UNCERTAIN",
))
if not d.key_dramatic_beats:
warnings.append(DeptValidationWarning(
type="empty_department", scene=sn, department="directing",
message="key_dramatic_beats is empty — every scene should have at least one beat",
))
# ── CINEMATOGRAPHY ───────────────────────────────────────────────────
c = breakdown.cinematography
for field_name in ["suggested_camera_style", "lens_character", "depth_of_field_intent",
"color_palette_direction", "visual_emphasis", "continuity_considerations"]:
val = getattr(c, field_name)
if not val or not val.strip():
warnings.append(DeptValidationWarning(
type="empty_department", scene=sn, department="cinematography",
message=f"Field '{field_name}' is empty",
))
if val and "UNCERTAIN" in val:
warnings.append(DeptValidationWarning(
type="uncertain_value", scene=sn, department="cinematography",
message=f"Field '{field_name}' contains UNCERTAIN",
))
if not c.movement_patterns:
warnings.append(DeptValidationWarning(
type="empty_department", scene=sn, department="cinematography",
message="movement_patterns is empty",
))
if not c.framing_priorities:
warnings.append(DeptValidationWarning(
type="empty_department", scene=sn, department="cinematography",
message="framing_priorities is empty",
))
# ── BIBLE REFERENCE CHECKS ───────────────────────────────────────────
# Check that any character names mentioned in directing text exist in bible
all_text = f"{d.scene_objective} {d.audience_takeaway} {d.subtext_notes} {d.continuity_considerations}"
all_text += " ".join(d.key_dramatic_beats)
_check_entity_refs(all_text, character_names, location_names, sn, "directing", warnings)
cine_text = f"{c.visual_emphasis} {c.continuity_considerations}"
cine_text += " ".join(c.framing_priorities)
_check_entity_refs(cine_text, character_names, location_names, sn, "cinematography", warnings)
return warnings
def _check_entity_refs(
text: str,
character_names: set[str],
location_names: set[str],
scene: int,
department: str,
warnings: list[DeptValidationWarning],
):
"""Soft check: if a proper-noun-like word appears that isn't a known entity, flag it.
This is a lightweight heuristic, not exhaustive. It catches obvious hallucinated names.
"""
# We check if any word that looks like a character name (all-caps, 3+ letters)
# appears in the text but isn't in our bible
import re
all_caps_words = set(re.findall(r'\b[A-Z]{3,}\b', text))
# Filter out common English words that happen to be all-caps
common_caps = {
"THE", "AND", "FOR", "BUT", "NOT", "WITH", "FROM", "THIS", "THAT", "WILL",
"HAS", "HIS", "HER", "THEY", "THEM", "THEIR", "BEEN", "HAVE", "EACH",
"WHICH", "WHEN", "WHAT", "ABOUT", "INTO", "OVER", "AFTER", "ALSO",
"UNCERTAIN", "UNKNOWN", "INTERIOR", "EXTERIOR", "NIGHT", "DAY", "MORNING",
"AFTERNOON", "DUSK", "DAWN", "BOTH",
}
known_upper = {n.upper() for n in character_names} | {n.upper() for n in location_names}
# Also add individual words from multi-word names
for n in character_names | location_names:
for part in n.upper().split():
known_upper.add(part)
suspicious = all_caps_words - common_caps - known_upper
for word in suspicious:
if len(word) >= 4: # only flag longer words to reduce noise
warnings.append(DeptValidationWarning(
type="broken_bible_ref", scene=scene, department=department,
message=f"All-caps name '{word}' not found in Production Bible — possible hallucination",
))

View File

@ -0,0 +1,18 @@
{
"contract_id": "L4_department_interpretation_v1",
"layer": "L4",
"version": 1,
"purpose": "Generate Directing and Cinematography department interpretations for a single scene",
"required_output_schema": "SceneDepartmentBreakdown",
"forbidden_behaviors": [
"Do not invent character traits not present in the Character Bible or scene data",
"Do not invent location details not present in the Location Bible or scene data",
"Do not invent props or events not supported by the scene extraction",
"Do not override canonical character or location identity from the Production Bible",
"Do not fabricate emotional states not grounded in source data",
"Do not guess — if information is uncertain, mark it as UNCERTAIN rather than inventing details"
],
"system_prompt": "You are a film department interpretation engine. You receive:\n1. A single scene extraction (JSON) from a screenplay\n2. The canonical Character Bible entries for characters in this scene\n3. The canonical Location Bible entry for this scene's location\n\nYou must produce department interpretation for exactly two departments: Directing and Cinematography.\n\nReturn a JSON object with these exact keys:\n- scene (int): the scene number\n- directing (object): directing interpretation\n- cinematography (object): cinematography interpretation\n\nDirecting object MUST contain:\n- scene_objective (string): What this scene must accomplish narratively\n- audience_takeaway (string): What the audience should feel/understand by the end\n- pacing_notes (string): Rhythm and tempo guidance (slow burn, rapid, building, etc.)\n- key_dramatic_beats (string[]): The pivotal moments in the scene, in order\n- subtext_notes (string): What is happening beneath the surface dialogue/action\n- continuity_considerations (string): What must be maintained from prior scenes or set up for future scenes\n\nCinematography object MUST contain:\n- suggested_camera_style (string): Handheld, steadicam, static, etc.\n- lens_character (string): Wide, intimate, compressed, etc.\n- movement_patterns (string[]): Camera movements through the scene\n- framing_priorities (string[]): What the frame should emphasize at key moments\n- depth_of_field_intent (string): Deep focus, shallow, rack focus, etc.\n- color_palette_direction (string): Color and tone guidance\n- visual_emphasis (string): The dominant visual element or composition strategy\n- continuity_considerations (string): Visual continuity with adjacent scenes\n\nRULES:\n- All interpretation must be grounded in the scene data + Production Bible\n- Character identity comes from the Character Bible — do not redefine\n- Location identity and visual environment come from the Location Bible — do not redefine\n- Interpretation is allowed but must be traceable to source evidence\n- If uncertain, write UNCERTAIN rather than guessing\n- Return ONLY the JSON object, no additional text",
"user_prompt_template": "Generate department interpretation for this scene.\n\nSCENE DATA:\n{{scene_json}}\n\nCHARACTER BIBLE ENTRIES (for characters in this scene):\n{{characters_json}}\n\nLOCATION BIBLE ENTRY:\n{{location_json}}",
"max_output_tokens": 4000
}

View File

@ -1,61 +1,33 @@
"""Department interpretation schemas — Layer 4. Built in Phase 3, defined now for contract stability.""" """Department interpretation schemas — Layer 4."""
from pydantic import BaseModel from pydantic import BaseModel
class DirectingOutput(BaseModel): class DirectingOutput(BaseModel):
scene_objective: str scene_objective: str
audience_takeaway: str
pacing_notes: str pacing_notes: str
key_dramatic_beats: list[str] key_dramatic_beats: list[str]
subtext_notes: str subtext_notes: str
continuity_considerations: str
class CinematographyOutput(BaseModel): class CinematographyOutput(BaseModel):
camera_style: str suggested_camera_style: str
lens_character: str lens_character: str
movement_patterns: list[str] movement_patterns: list[str]
framing_priorities: list[str]
depth_of_field_intent: str depth_of_field_intent: str
color_palette_direction: str color_palette_direction: str
visual_emphasis: str
continuity_considerations: str
class LightingOutput(BaseModel):
key_light_direction: str
practical_sources: list[str]
mood_notes: str
time_of_day_requirements: str
contrast_ratio_intent: str
class ProductionDesignOutput(BaseModel):
required_set_elements: list[str]
set_dressing_priorities: list[str]
color_texture_palette: str
era_period_notes: str
spatial_blocking_requirements: str
class WardrobeOutput(BaseModel):
character_wardrobe: list[dict]
costume_condition: str
thematic_notes: str
changes_from_previous: str
class PerformanceOutput(BaseModel):
character_states: list[dict]
key_shifts: list[str]
subtext_notes: str
physical_behavior_cues: list[str]
class StoryboardOutput(BaseModel):
character_positions: list[str]
key_composition_frames: list[str]
spatial_relationships: list[str]
entry_exit_patterns: list[str]
class SceneDepartmentBreakdown(BaseModel): class SceneDepartmentBreakdown(BaseModel):
scene: int scene: int
directing: DirectingOutput directing: DirectingOutput
cinematography: CinematographyOutput cinematography: CinematographyOutput
class DepartmentInterpretation(BaseModel):
breakdowns: list[SceneDepartmentBreakdown]