#!/usr/bin/env python3
"""
SWP Integration Script — Symbol Word Protocol + Local LLM
==========================================================

This script shows the complete workflow:
  1. Send your text to the IdeaPhase SWP Encoding API
  2. Get back structured SWP-tagged output with a prompt template
  3. Feed that prompt template to your local LLM (Ollama, LM Studio, etc.)

The SWP tags act as cognitive scaffolding — your LLM reads them and
produces more structured, deterministic, compliance-aware output.

Requirements:
  pip install requests

Usage:
  python swp_integration.py

Replace YOUR_API_KEY below with your actual SWP API key (starts with swp_).
Replace IDEAPHASE_URL with your published IdeaPhase URL.
"""

import requests
import json
import sys

# ============================================================
# CONFIGURATION — Edit these values
# ============================================================

IDEAPHASE_URL = "https://your-ideaphase-url.replit.app"
SWP_API_KEY = "swp_your_api_key_here"

OLLAMA_URL = "http://localhost:11434"
OLLAMA_MODEL = "llama3"

LM_STUDIO_URL = "http://localhost:1234"
LM_STUDIO_MODEL = "local-model"

# ============================================================
# STEP 1: Encode your text with SWP
# ============================================================

def encode_with_swp(text, framework="general"):
    """
    Send raw text to the IdeaPhase SWP Encoding API.
    Returns the full response including tagged content,
    prompt template, LoRA guidance, and proof chain data.

    Args:
        text: Your raw document or idea text
        framework: Domain framework to apply. Options:
            "general"       — Standard tagging (default)
            "healthcare"    — HIPAA/GDPR compliance tags
            "robotics"      — ISO 13482/IEC 61508 safety tags
            "legal"         — GDPR/CCPA/EULA compliance tags
            "education"     — FERPA/COPPA compliance tags
            "finance"       — SOX/FINRA/PCI-DSS compliance tags
            "ai_agent"      — Multi-agent orchestration tags
    """
    response = requests.post(
        f"{IDEAPHASE_URL}/api/v1/encode",
        headers={
            "Authorization": f"Bearer {SWP_API_KEY}",
            "Content-Type": "application/json"
        },
        json={
            "text": text,
            "framework": framework,
            "include_extended_tags": True
        }
    )

    if response.status_code != 200:
        print(f"SWP API Error ({response.status_code}): {response.json()}")
        sys.exit(1)

    return response.json()


# ============================================================
# STEP 2a: Send to Ollama (local models)
# ============================================================

def send_to_ollama(prompt, model=None):
    """
    Send the SWP prompt template to Ollama for processing.

    Ollama runs models like Llama 3, Mistral, Phi, etc. locally.
    Install: https://ollama.ai
    Pull a model: ollama pull llama3
    """
    model = model or OLLAMA_MODEL

    response = requests.post(
        f"{OLLAMA_URL}/api/generate",
        json={
            "model": model,
            "prompt": prompt,
            "stream": False
        }
    )

    if response.status_code != 200:
        print(f"Ollama Error ({response.status_code}): {response.text}")
        return None

    return response.json().get("response", "")


# ============================================================
# STEP 2b: Send to LM Studio (local models)
# ============================================================

def send_to_lm_studio(prompt, model=None):
    """
    Send the SWP prompt template to LM Studio for processing.

    LM Studio provides an OpenAI-compatible API at localhost:1234.
    Download: https://lmstudio.ai
    Load any GGUF model and start the local server.
    """
    model = model or LM_STUDIO_MODEL

    response = requests.post(
        f"{LM_STUDIO_URL}/v1/chat/completions",
        headers={"Content-Type": "application/json"},
        json={
            "model": model,
            "messages": [
                {
                    "role": "system",
                    "content": "You are an AI assistant processing SWP-encoded documents. Follow all SWP tag instructions precisely."
                },
                {
                    "role": "user",
                    "content": prompt
                }
            ],
            "temperature": 0.3,
            "max_tokens": 2000
        }
    )

    if response.status_code != 200:
        print(f"LM Studio Error ({response.status_code}): {response.text}")
        return None

    return response.json()["choices"][0]["message"]["content"]


# ============================================================
# STEP 2c: Send to any OpenAI-compatible API
# ============================================================

def send_to_openai_compatible(prompt, base_url, api_key="not-needed", model="default"):
    """
    Works with any OpenAI-compatible API endpoint.
    This covers: LM Studio, Ollama (with OpenAI mode), vLLM,
    text-generation-webui, LocalAI, and others.
    """
    response = requests.post(
        f"{base_url}/v1/chat/completions",
        headers={
            "Content-Type": "application/json",
            "Authorization": f"Bearer {api_key}"
        },
        json={
            "model": model,
            "messages": [
                {
                    "role": "system",
                    "content": "You are an AI assistant processing SWP-encoded documents. Follow all SWP tag instructions precisely."
                },
                {
                    "role": "user",
                    "content": prompt
                }
            ],
            "temperature": 0.3,
            "max_tokens": 2000
        }
    )

    if response.status_code != 200:
        print(f"API Error ({response.status_code}): {response.text}")
        return None

    return response.json()["choices"][0]["message"]["content"]


# ============================================================
# COMPLETE WORKFLOW EXAMPLES
# ============================================================

def example_general():
    """Basic example: encode text and send to Ollama."""

    print("=" * 60)
    print("EXAMPLE: General SWP Encoding + Ollama")
    print("=" * 60)

    my_text = """
    Project Alpha is a cloud-based inventory management system
    designed for mid-size retailers. The system needs to handle
    real-time stock tracking, automated reordering, and multi-
    location warehouse management. Initial deployment targets
    Q3 2026 with 50 pilot stores.
    """

    print("\n[1] Sending text to SWP Encoding API...")
    swp_result = encode_with_swp(my_text, framework="general")

    print(f"    Phase: {swp_result['tagged_document']['core_tags']['phase']}")
    print(f"    Weight: {swp_result['tagged_document']['core_tags']['weight']}")
    print(f"    Tags applied: {swp_result['metadata']['tag_count']}")
    print(f"    Proof hash: {swp_result.get('proof', {}).get('proof_hash', 'N/A')[:16]}...")

    prompt = swp_result["prompt_template"]

    print(f"\n[2] Sending SWP prompt to Ollama ({OLLAMA_MODEL})...")
    llm_response = send_to_ollama(prompt)

    if llm_response:
        print(f"\n[3] LLM Response (SWP-guided):")
        print("-" * 40)
        print(llm_response[:500])
        if len(llm_response) > 500:
            print(f"... ({len(llm_response)} chars total)")
    else:
        print("\n[!] Could not reach Ollama. Make sure it's running.")
        print("    Start it with: ollama serve")
        print("    Then pull a model: ollama pull llama3")


def example_healthcare():
    """Healthcare example with HIPAA compliance tags."""

    print("\n" + "=" * 60)
    print("EXAMPLE: Healthcare Framework + LM Studio")
    print("=" * 60)

    clinical_text = """
    Patient intake process redesign for Memorial Hospital's
    emergency department. The new system must capture demographics,
    insurance verification, triage assessment, and consent forms
    digitally. All data must comply with HIPAA requirements and
    integrate with the existing Epic EHR system.
    """

    print("\n[1] Encoding with Healthcare framework...")
    swp_result = encode_with_swp(clinical_text, framework="healthcare")

    print(f"    Phase: {swp_result['tagged_document']['core_tags']['phase']}")
    print(f"    Weight: {swp_result['tagged_document']['core_tags']['weight']}")
    print(f"    Tags applied: {swp_result['metadata']['tag_count']}")

    domain_tags = swp_result['tagged_document'].get('domain_tags', {})
    if domain_tags:
        print(f"    Domain tags: {json.dumps(domain_tags, indent=6)}")

    prompt = swp_result["prompt_template"]

    print(f"\n[2] Sending to LM Studio...")
    llm_response = send_to_lm_studio(prompt)

    if llm_response:
        print(f"\n[3] LLM Response (Healthcare-compliant):")
        print("-" * 40)
        print(llm_response[:500])
    else:
        print("\n[!] Could not reach LM Studio. Make sure the server is running.")
        print("    Open LM Studio > Load a model > Start server (port 1234)")


def example_finance():
    """Finance example showing regulatory compliance."""

    print("\n" + "=" * 60)
    print("EXAMPLE: Financial Services Framework")
    print("=" * 60)

    finance_text = """
    Quarterly risk assessment for the derivatives trading desk.
    Review current VaR calculations, stress test scenarios, and
    counterparty exposure limits. Ensure all positions comply
    with Basel III capital requirements and FINRA margin rules.
    """

    print("\n[1] Encoding with Finance framework...")
    swp_result = encode_with_swp(finance_text, framework="finance")

    print(f"    Tags applied: {swp_result['metadata']['tag_count']}")
    print(f"    Framework: {swp_result['metadata']['framework']}")

    print("\n[2] Tagged document preview:")
    print("-" * 40)
    tagged = swp_result["tagged_document"]["full_text"]
    for line in tagged.split("\n")[:10]:
        print(f"    {line}")

    print(f"\n[3] Prompt template ready — {len(swp_result['prompt_template'])} chars")
    print("    Feed this to any LLM for compliance-aware analysis.")


def example_proof_verification():
    """Show how to verify a document's proof chain."""

    print("\n" + "=" * 60)
    print("EXAMPLE: Proof Chain Verification")
    print("=" * 60)

    my_text = "Contract amendment for vendor agreement #4521."

    print("\n[1] Encoding document...")
    swp_result = encode_with_swp(my_text)

    proof = swp_result.get("proof", {})
    if proof:
        doc_id = proof["document_id"]
        proof_hash = proof["proof_hash"]
        chain_pos = proof["chain_position"]

        print(f"    Document ID: {doc_id}")
        print(f"    Proof Hash: {proof_hash[:32]}...")
        print(f"    Chain Position: {chain_pos}")

        print(f"\n[2] Verifying proof (public endpoint, no auth needed)...")
        verify = requests.get(f"{IDEAPHASE_URL}/api/v1/proof/verify/{proof_hash}")

        if verify.status_code == 200:
            v = verify.json()
            print(f"    Verified: {v.get('verified', False)}")
            print(f"    Timestamp: {v.get('timestamp', 'N/A')}")
        else:
            print(f"    Verification returned: {verify.status_code}")
    else:
        print("    No proof data returned (check API key permissions)")


# ============================================================
# CURL QUICK REFERENCE
# ============================================================

def print_curl_examples():
    """Print cURL commands for quick testing."""

    print("\n" + "=" * 60)
    print("CURL QUICK REFERENCE")
    print("=" * 60)

    print(f"""
=== TEST YOUR KEY FIRST (all platforms) ===

# Linux/Mac:
curl {IDEAPHASE_URL}/api/v1/key/test -H "Authorization: Bearer YOUR_SWP_API_KEY"

# Windows CMD (one line):
curl {IDEAPHASE_URL}/api/v1/key/test -H "Authorization: Bearer YOUR_SWP_API_KEY"

# Windows PowerShell (one line):
curl.exe {IDEAPHASE_URL}/api/v1/key/test -H "Authorization: Bearer YOUR_SWP_API_KEY"

# Expected: {{"valid": true, "key_prefix": "swp_...", "message": "API key is valid and active"}}


IMPORTANT: Always paste commands as a SINGLE LINE. If your terminal wraps the text visually, that's fine -- just don't insert any line breaks when pasting.


=== Linux / Mac / Git Bash (single line) ===

curl -X POST {IDEAPHASE_URL}/api/v1/encode -H "Authorization: Bearer YOUR_SWP_API_KEY" -H "Content-Type: application/json" -d '{{"text": "Your document text here", "framework": "general"}}'


=== Windows CMD (single line) ===

curl -X POST {IDEAPHASE_URL}/api/v1/encode -H "Authorization: Bearer YOUR_SWP_API_KEY" -H "Content-Type: application/json" -d "{{\\"text\\": \\"Your document text here\\", \\"framework\\": \\"general\\"}}"


=== Windows PowerShell (single line, use curl.exe with --% stop-parsing) ===
# The --% token tells PowerShell to pass everything after it literally to curl.exe

curl.exe --% -X POST {IDEAPHASE_URL}/api/v1/encode -H "Authorization: Bearer YOUR_SWP_API_KEY" -H "Content-Type: application/json" -d "{{\\"text\\": \\"Your document text here\\", \\"framework\\": \\"general\\"}}"


=== Verify a proof (no auth needed, same on all platforms) ===

curl {IDEAPHASE_URL}/api/v1/proof/verify/YOUR_PROOF_HASH
""")


# ============================================================
# OFFLINE DEMO (no API key needed)
# ============================================================

def example_finance_offline():
    """Show the workflow structure without making API calls."""
    print("=" * 60)
    print("WORKFLOW STRUCTURE (no live API calls)")
    print("=" * 60)

    print("""
    The integration flow works like this:

    ┌─────────────┐     ┌──────────────┐     ┌──────────────┐
    │  Your Text   │ ──→ │  IdeaPhase   │ ──→ │  Your Local  │
    │  (raw idea)  │     │  SWP API     │     │  LLM         │
    └─────────────┘     └──────────────┘     └──────────────┘

    Step 1: You send raw text to IdeaPhase
            POST /api/v1/encode
            Authorization: Bearer swp_your_key

    Step 2: IdeaPhase returns:
            - tagged_document: Your text with SWP tags prepended
            - prompt_template: Ready-to-use prompt with instructions
            - lora_guidance: Fine-tuning recommendations
            - proof: Cryptographic proof hash + chain data

    Step 3: You take the prompt_template and send it to your LLM:
            - Ollama: POST http://localhost:11434/api/generate
            - LM Studio: POST http://localhost:1234/v1/chat/completions
            - Any OpenAI-compatible endpoint

    The LLM never sees your API key. The SWP tags in the prompt
    guide the model to produce structured, deterministic output.
    """)

    print("Sample API response structure:")
    print("-" * 40)
    sample = {
        "swp_version": "1.0",
        "encoding_id": "a1b2c3d4e5f67890",
        "tagged_document": {
            "full_text": "@phase: Foundation\n@weight: 6\n@symbolic_id: 3\n@swp_sum: 42\n@priority: High\n\nYour document text...",
            "core_tags": {
                "phase": "Foundation",
                "weight": 6,
                "symbolic_id": 3,
                "swp_sum": 42
            },
            "extended_tags": {
                "priority": "High",
                "category": "Technical",
                "risk_level": "Moderate"
            },
            "domain_tags": None
        },
        "prompt_template": "[Full prompt with instructions + tagged document -- ready to paste into your LLM]",
        "lora_guidance": {
            "recommended_rank": 8,
            "recommended_alpha": 16,
            "tag_dimensions": [
                {"tag": "@phase", "dimension": "lifecycle_position", "training_weight": 1.0},
                {"tag": "@weight", "dimension": "complexity_score", "training_weight": 0.9},
                {"tag": "@symbolic_id", "dimension": "content_category", "training_weight": 0.7},
                {"tag": "@priority", "dimension": "urgency_signal", "training_weight": 0.8},
                {"tag": "@sector", "dimension": "domain_context", "training_weight": 0.85}
            ],
            "training_hints": [
                "Focus attention heads on @phase transitions to learn document lifecycle patterns",
                "Use @weight values as a proxy for expected response length and detail",
                "Domain tags (@sector, @regulatory_standards) should activate compliance-aware reasoning paths"
            ],
            "dataset_preparation": {
                "format": "instruction-response pairs with SWP tags prepended to instruction",
                "recommended_epochs": 3,
                "learning_rate": "2e-5 for full fine-tune, 1e-4 for LoRA"
            }
        },
        "proof": {
            "document_id": "uuid-here",
            "proof_hash": "sha256-hash",
            "chain_position": 1,
            "previous_proof_hash": None,
            "timestamp": "2026-02-16T12:00:00+00:00",
            "receipt_url": "/api/v1/proof/uuid-here"
        },
        "metadata": {
            "tag_count": 7,
            "framework": "general",
            "processing_time_ms": 12.5,
            "input_length": 150,
            "word_count": 25,
            "input_hash": "sha256-of-input",
            "encoded_at": "2026-02-16T12:00:00+00:00"
        }
    }
    print(json.dumps(sample, indent=2))


# ============================================================
# MAIN — Run the examples
# ============================================================

if __name__ == "__main__":
    print()
    print("  SWP Integration Script")
    print("  Symbol Word Protocol + Local LLM")
    print("  ─────────────────────────────────")
    print()

    if SWP_API_KEY == "swp_your_api_key_here":
        print("  ⚠  Before running, edit this file and set:")
        print(f"     IDEAPHASE_URL = \"https://your-published-url.replit.app\"")
        print(f"     SWP_API_KEY   = \"swp_your_actual_key\"")
        print()
        print("  Get your API key:")
        print("    Founder: Use SWP_FOUNDER_API_KEY from your secrets")
        print("    Pro user: Create a key at the IdeaPhase dashboard")
        print()
        print_curl_examples()
        print("  Showing API structure without live calls...\n")
        example_finance_offline()
        sys.exit(0)

    print("  Running live examples...\n")

    example_general()
    example_healthcare()
    example_finance()
    example_proof_verification()
    print_curl_examples()

    print("\n" + "=" * 60)
    print("Done! Your SWP integration is working.")
    print("=" * 60)
