# -*- coding: utf-8 -*-
"""
Assistant Gateway (FastAPI) — Moodle + RAG + OpenAI
Versão atualizada com correção definitiva do path INDEX_DIR

Funcionalidades:
- Endpoints Moodle (/courses, /course-contents, etc.)
- RAG com FAISS + SentenceTransformers (/ask)
- LLM integration (OpenAI compatible)
- Debug endpoints para diagnóstico
- Path absoluto fixo para resolver problemas de working directory
"""

from __future__ import annotations
import os
import json
import time
import re
import logging
from functools import lru_cache
from typing import Any, Dict, List, Optional

import requests
from fastapi import FastAPI, HTTPException, Query
from pydantic import BaseModel, validator
from fastapi import FastAPI, HTTPException, Query, Response
from fastapi.middleware.cors import CORSMiddleware

app = FastAPI(title="Assistant Gateway", version="1.0.0")

ALLOWED_ORIGINS = ["https://lms.ed-consulting.ao"]

app.add_middleware(
    CORSMiddleware,
    allow_origins=ALLOWED_ORIGINS,   # ["*"] para teste
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# =========================
# Configuração de Logging
# =========================
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# =========================
# Configurações / Ambiente - CORRIGIDO
# =========================
MOODLE_URL      = os.getenv("MOODLE_URL", "https://lms.ed-consulting.ao")
MOODLE_TOKEN    = os.getenv("MOODLE_TOKEN", "")
REQUEST_TIMEOUT = int(os.getenv("REQUEST_TIMEOUT", "25"))

# OpenAI / compat (vLLM, Fireworks etc)
MODEL_API_URL   = os.getenv("MODEL_API_URL", "").strip()
MODEL_API_KEY   = os.getenv("MODEL_API_KEY", "").strip()
MODEL_NAME      = os.getenv("MODEL_NAME", os.getenv("OPENAI_MODEL", "gpt-4o-mini")).strip()
MODEL_PROVIDER  = os.getenv("MODEL_PROVIDER", "openai").strip().lower()

# *** CORREÇÃO DEFINITIVA: PATH ABSOLUTO FIXO ***
INDEX_DIR = "/var/www/html/assistant/indexes"

# Debug da configuração na inicialização
print(f"=== GATEWAY CONFIGURATION ===")
print(f"INDEX_DIR: {INDEX_DIR}")
print(f"INDEX_DIR exists: {os.path.exists(INDEX_DIR)}")
print(f"Current working directory: {os.getcwd()}")

# Verificar e listar arquivos no diretório de índices
if os.path.exists(INDEX_DIR):
    try:
        files = os.listdir(INDEX_DIR)
        print(f"Files in INDEX_DIR: {files}")
        
        # Verificar especificamente os arquivos do curso 3
        course_3_faiss = os.path.join(INDEX_DIR, "course_3.faiss")
        course_3_meta = os.path.join(INDEX_DIR, "course_3_meta.json")
        print(f"course_3.faiss exists: {os.path.exists(course_3_faiss)}")
        print(f"course_3_meta.json exists: {os.path.exists(course_3_meta)}")
        
        if os.path.exists(course_3_meta):
            stat_info = os.stat(course_3_meta)
            print(f"course_3_meta.json size: {stat_info.st_size} bytes")
            print(f"course_3_meta.json permissions: {oct(stat_info.st_mode)}")
            
    except Exception as e:
        print(f"Error listing INDEX_DIR: {e}")
else:
    print(f"INDEX_DIR does not exist: {INDEX_DIR}")

print(f"=== END CONFIGURATION ===")

# =========================
# App
# =========================
app = FastAPI(
    title="Assistant Gateway", 
    version="1.2.0",
    description="Moodle + RAG + LLM Gateway - Fixed Paths Version"
)

# =========================
# Session para requests (connection pooling)
# =========================
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(
    pool_connections=10,
    pool_maxsize=20,
    max_retries=3
)
session.mount('http://', adapter)
session.mount('https://', adapter)

# =========================
# Cache para SentenceTransformer
# =========================
@lru_cache(maxsize=1)
def get_sentence_transformer():
    """Cache do modelo SentenceTransformer para evitar recarregar"""
    try:
        from sentence_transformers import SentenceTransformer
        logger.info("Loading SentenceTransformer model...")
        model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
        logger.info("SentenceTransformer model loaded successfully")
        return model
    except ImportError as e:
        logger.error(f"Failed to import SentenceTransformer: {e}")
        raise
    except Exception as e:
        logger.error(f"Failed to load SentenceTransformer model: {e}")
        raise

# =========================
# Cache para Retrievers
# =========================
retrievers_cache: Dict[int, 'Retriever'] = {}

def get_retriever(courseid: int) -> 'Retriever':
    """Cache dos retrievers para evitar recarregar índices"""
    if courseid not in retrievers_cache:
        logger.info(f"Creating new retriever for course {courseid}")
        retrievers_cache[courseid] = Retriever(courseid)
    return retrievers_cache[courseid]

# =========================
# Utilidades
# =========================
def call_moodle(wsfunction: str, params: Dict[str, Any]) -> Any:
    """Chama Moodle REST API e retorna JSON."""
    if not MOODLE_TOKEN:
        logger.error("MOODLE_TOKEN not configured")
        raise HTTPException(500, "MOODLE_TOKEN não configurado")
    
    url = f"{MOODLE_URL}/webservice/rest/server.php"
    qs = {
        "wstoken": MOODLE_TOKEN, 
        "moodlewsrestformat": "json", 
        "wsfunction": wsfunction
    }
    
    try:
        r = session.post(url, params=qs, data=params, timeout=REQUEST_TIMEOUT)
        r.raise_for_status()
        data = r.json()
        
        if isinstance(data, dict) and data.get("exception"):
            logger.error(f"Moodle API error: {data}")
            raise HTTPException(400, f"Erro Moodle: {data.get('message', 'Erro desconhecido')}")
        
        return data
        
    except requests.Timeout:
        logger.error("Timeout connecting to Moodle")
        raise HTTPException(504, "Timeout ao contactar Moodle")
    except requests.RequestException as e:
        logger.error(f"Moodle request failed: {e}")
        raise HTTPException(502, f"Falha ao contactar Moodle: {e}")

# =========================
# Debug & Health
# =========================
@app.get("/health")
def health():
    """Health check endpoint"""
    return {
        "status": "ok", 
        "timestamp": int(time.time()),
        "version": "1.2.0",
        "index_dir": INDEX_DIR,
        "index_dir_exists": os.path.exists(INDEX_DIR)
    }

@app.get("/debug-config")
def debug_config():
    """Debug configuration (safe version)"""
    return {
        "moodle_url": MOODLE_URL,
        "has_moodle_token": bool(MOODLE_TOKEN),
        "index_dir": INDEX_DIR,
        "index_dir_exists": os.path.exists(INDEX_DIR),
        "has_model_config": bool(MODEL_API_URL and MODEL_API_KEY),
        "model_name": MODEL_NAME,
        "python_cwd": os.getcwd(),
        "python_version": f"{__import__('sys').version}",
    }

@app.options("/ask")
def options_ask():
    return Response(status_code=204)

@app.get("/cors-test")
def cors_test():
    """Endpoint simples para testar CORS"""
    return {
        "message": "CORS está funcionando!",
        "timestamp": int(time.time()),
        "origin_allowed": True
    }

@app.get("/debug-routes")
def debug_routes():
    """List all available routes"""
    routes = []
    for route in app.router.routes:
        try:
            routes.append({
                "path": route.path,
                "methods": sorted(list(getattr(route, 'methods', [])))
            })
        except Exception:
            pass
    return {"routes": routes}

@app.get("/debug-rag/{courseid}")
def debug_rag(courseid: int):
    """Debug endpoint ATUALIZADO para verificar status do RAG"""
    index_path = os.path.join(INDEX_DIR, f"course_{courseid}.faiss")
    meta_path = os.path.join(INDEX_DIR, f"course_{courseid}.meta.json")
    
    info = {
        "courseid": courseid,
        "index_dir": INDEX_DIR,
        "index_path": index_path,
        "meta_path": meta_path,
        "index_exists": os.path.exists(index_path),
        "meta_exists": os.path.exists(meta_path),
        "cwd": os.getcwd(),
    }
    
    # Verificações detalhadas de arquivos
    for path_key, file_path in [("index", index_path), ("meta", meta_path)]:
        if os.path.exists(file_path):
            try:
                stat_info = os.stat(file_path)
                info[f"{path_key}_size"] = stat_info.st_size
                info[f"{path_key}_permissions"] = oct(stat_info.st_mode)[-3:]
                info[f"{path_key}_readable"] = os.access(file_path, os.R_OK)
                info[f"{path_key}_modified"] = stat_info.st_mtime
                
                # Teste direto de leitura para meta
                if path_key == "meta":
                    try:
                        with open(file_path, 'r', encoding='utf-8') as f:
                            content = f.read()
                            info["meta_content_length"] = len(content)
                            info["meta_preview"] = content[:200] + "..." if len(content) > 200 else content
                            
                            # Parse JSON
                            meta_data = json.loads(content)
                            info["meta_json_valid"] = True
                            info["meta_type"] = type(meta_data).__name__
                            if isinstance(meta_data, (list, dict)):
                                info["meta_items"] = len(meta_data)
                    except Exception as read_error:
                        info["meta_read_error"] = str(read_error)
                        
            except Exception as e:
                info[f"{path_key}_error"] = str(e)
    
    # Status das dependências
    deps = {}
    try:
        import faiss
        deps["faiss"] = {"status": "OK", "version": getattr(faiss, "__version__", "unknown")}
    except ImportError as e:
        deps["faiss"] = {"status": "ERROR", "error": str(e)}
    
    try:
        from sentence_transformers import SentenceTransformer
        deps["sentence_transformers"] = {"status": "OK"}
    except ImportError as e:
        deps["sentence_transformers"] = {"status": "ERROR", "error": str(e)}
    
    info["dependencies"] = deps
    
    # Teste completo do retriever
    try:
        retriever = get_retriever(courseid)
        info["retriever_ok"] = retriever.ok()
        
        if retriever.ok():
            # Teste de busca
            test_results = retriever.search("test query", k=1)
            info["test_search_count"] = len(test_results)
            if test_results:
                info["test_search_sample"] = test_results[0]
            info["retriever_status"] = "SUCCESS"
        else:
            info["retriever_error"] = retriever.last_error()
            info["retriever_status"] = "FAILED"
            
    except Exception as e:
        info["retriever_exception"] = str(e)
        info["retriever_status"] = "EXCEPTION"
    
    return info

# =========================
# Moodle Endpoints
# =========================
@app.get("/site-info")
def site_info():
    """Get Moodle site information"""
    return call_moodle("core_webservice_get_site_info", {})

@app.get("/courses")
def courses():
    """Get all courses"""
    return call_moodle("core_course_get_courses", {})

@app.get("/courses_slim")
def courses_slim():
    """Get courses with essential fields only"""
    data = call_moodle("core_course_get_courses", {})
    return [
        {
            "id": c.get("id"), 
            "shortname": c.get("shortname"), 
            "fullname": c.get("fullname")
        } 
        for c in data
    ]

@app.get("/user-courses")
def user_courses(userid: int):
    """Get courses for a specific user"""
    return call_moodle("core_enrol_get_users_courses", {"userid": userid})

@app.get("/course-contents")
def course_contents(
    courseid: int,
    includecontents: Optional[str] = Query(None, description="true/false"),
    excludemodules: Optional[str] = Query(None, description="true/false"),
):
    """Get course contents with options"""
    opts: Dict[str, Any] = {"courseid": courseid}
    idx = 0
    
    if includecontents is not None:
        opts[f"options[{idx}][name]"] = "includecontents"
        opts[f"options[{idx}][value]"] = includecontents
        idx += 1
    
    if excludemodules is not None:
        opts[f"options[{idx}][name]"] = "excludemodules"
        opts[f"options[{idx}][value]"] = excludemodules
    
    return call_moodle("core_course_get_contents", opts)

@app.get("/assignments")
def assignments(courseids: str = Query(..., description="Course IDs separated by comma")):
    """Get assignments for courses"""
    ids = [int(x) for x in re.split(r"[,\s]+", courseids.strip()) if x]
    params = {}
    for i, cid in enumerate(ids):
        params[f"courseids[{i}]"] = cid
    return call_moodle("mod_assign_get_assignments", params)

@app.get("/calendar/events")
def calendar_events(courseids: str = Query(..., description="Course IDs separated by comma")):
    """Get calendar events for courses"""
    ids = [int(x) for x in re.split(r"[,\s]+", courseids.strip()) if x]
    params = {}
    for i, cid in enumerate(ids):
        params[f"events[events][{i}][eventtype]"] = "course"
        params[f"events[events][{i}][courseids][0]"] = cid
    return call_moodle("core_calendar_get_calendar_events", params)

# =========================
# RAG: Retriever Class - VERSÃO CORRIGIDA
# =========================
class Retriever:
    """
    Retriever corrigido com path absoluto e logs detalhados
    """
    
    def __init__(self, courseid: int):
        self.courseid = int(courseid)
        # Usar path absoluto fixo
        self.index_path = os.path.join(INDEX_DIR, f"course_{self.courseid}.faiss")
        self.meta_path = os.path.join(INDEX_DIR, f"course_{self.courseid}.meta.json")
        self._init_ok = False
        self._init_err = None
        
        logger.info(f"*** INITIALIZING RETRIEVER FOR COURSE {courseid} ***")
        logger.info(f"Index path: {self.index_path}")
        logger.info(f"Meta path: {self.meta_path}")
        
        self._initialize()
    
    def _initialize(self):
        """Initialize com logging detalhado e verificações robustas"""
        try:
            # Import dependencies
            import faiss
            logger.info("FAISS imported successfully")
            
            # Verificação inicial dos arquivos
            logger.info(f"Checking if index exists: {self.index_path}")
            index_exists = os.path.exists(self.index_path)
            logger.info(f"Index exists: {index_exists}")
            
            logger.info(f"Checking if meta exists: {self.meta_path}")
            meta_exists = os.path.exists(self.meta_path)
            logger.info(f"Meta exists: {meta_exists}")
            
            if not index_exists:
                raise FileNotFoundError(f"FAISS index file not found: {self.index_path}")
            if not meta_exists:
                raise FileNotFoundError(f"Metadata file not found: {self.meta_path}")
            
            # Verificar permissões
            if not os.access(self.index_path, os.R_OK):
                raise PermissionError(f"Cannot read index file: {self.index_path}")
            if not os.access(self.meta_path, os.R_OK):
                raise PermissionError(f"Cannot read metadata file: {self.meta_path}")
            
            logger.info("File permissions OK")
            
            # Carregar FAISS index
            logger.info("Loading FAISS index...")
            self.faiss = faiss
            self.index = faiss.read_index(self.index_path)
            logger.info(f"FAISS index loaded successfully: {self.index.ntotal} vectors")
            
            # Carregar SentenceTransformer (cached)
            logger.info("Getting SentenceTransformer model...")
            self.embed = get_sentence_transformer()
            logger.info("SentenceTransformer ready")
            
            # Carregar metadados
            logger.info("Loading metadata file...")
            with open(self.meta_path, "r", encoding="utf-8") as f:
                meta = json.load(f)
            
            logger.info(f"Metadata loaded: type={type(meta)}")
            
            # Processar metadados
            self._process_metadata(meta)
            
            self._init_ok = True
            logger.info(f"*** RETRIEVER INITIALIZATION COMPLETED SUCCESSFULLY FOR COURSE {self.courseid} ***")
            
        except Exception as e:
            self._init_ok = False
            self._init_err = e
            logger.error(f"*** RETRIEVER INITIALIZATION FAILED FOR COURSE {self.courseid}: {e} ***")
            import traceback
            logger.error(f"Full traceback:\n{traceback.format_exc()}")
    
    def _process_metadata(self, meta):
        """Process and normalize metadata format"""
        if isinstance(meta, list):
            self.meta_list = meta
            self.meta_dict = {str(i): item for i, item in enumerate(meta)}
            logger.info(f"Metadata processed: list format with {len(meta)} items")
        elif isinstance(meta, dict):
            self.meta_dict = meta
            try:
                max_key = max(int(k) for k in meta.keys() if k.isdigit())
                self.meta_list = [meta.get(str(i), {}) for i in range(max_key + 1)]
                logger.info(f"Metadata processed: dict format with {len(meta)} keys, max_index={max_key}")
            except (ValueError, TypeError):
                self.meta_list = list(meta.values())
                logger.info(f"Metadata processed: dict format with {len(meta)} keys, non-numeric indices")
        else:
            self.meta_list = []
            self.meta_dict = {}
            logger.warning(f"Unknown metadata format: {type(meta)}")
    
    def ok(self) -> bool:
        """Check if retriever is properly initialized"""
        return self._init_ok
    
    def last_error(self) -> str:
        """Get last initialization error"""
        return str(self._init_err) if self._init_err else "No error"
    
    def search(self, question: str, k: int = 6) -> List[Dict[str, Any]]:
        """Search for top-k relevant passages"""
        if not self._init_ok:
            raise RuntimeError(f"Retriever not initialized: {self.last_error()}")
        
        logger.info(f"Searching for: '{question[:50]}...' (k={k})")
        
        # Generate query embedding
        qv = self.embed.encode([question])
        D, I = self.index.search(qv, k)
        
        results = []
        for rank, idx in enumerate(I[0]):
            if idx == -1:  # FAISS returns -1 for not found
                continue
                
            # Get metadata item
            item = None
            if idx < len(self.meta_list):
                item = self.meta_list[idx]
            elif str(idx) in self.meta_dict:
                item = self.meta_dict[str(idx)]
            
            if not item:
                item = {"idx": int(idx), "text": f"Chunk {idx} (no metadata)"}
            
            # Create result item
            result = dict(item)
            result.update({
                "rank": rank,
                "faiss_idx": int(idx),
                "distance": float(D[0][rank]),
                "similarity": max(0, 1.0 - float(D[0][rank]))  # Convert distance to similarity
            })
            
            results.append(result)
        
        logger.info(f"Search completed: found {len(results)} results")
        return results

# =========================
# LLM Client
# =========================
def call_llm_openai(prompt: str) -> str:
    """Call OpenAI-compatible LLM API"""
    if not (MODEL_API_URL and MODEL_API_KEY and MODEL_NAME):
        logger.warning("LLM not configured, returning simulated response")
        return "Resposta simulada (LLM não configurado). Configure MODEL_API_URL, MODEL_API_KEY e MODEL_NAME para usar um modelo real."
    
    headers = {
        "Authorization": f"Bearer {MODEL_API_KEY}",
        "Content-Type": "application/json",
    }
    
    payload = {
        "model": MODEL_NAME,
        "messages": [
            {
                "role": "system", 
                "content": "Você é um assistente educacional. Responda com base nos trechos fornecidos de forma clara e objetiva."
            },
            {
                "role": "user", 
                "content": prompt
            },
        ],
        "temperature": 0.2,
        "max_tokens": 1000,
    }
    
    try:
        logger.info(f"Calling LLM API: {MODEL_API_URL}")
        r = session.post(MODEL_API_URL, headers=headers, json=payload, timeout=REQUEST_TIMEOUT)
        r.raise_for_status()
        
        data = r.json()
        answer = data.get("choices", [{}])[0].get("message", {}).get("content", "").strip()
        
        if not answer:
            logger.warning("Empty response from LLM")
            return "Resposta vazia do modelo."
        
        logger.info(f"LLM response received: {len(answer)} chars")
        return answer
        
    except requests.RequestException as e:
        logger.error(f"LLM API call failed: {e}")
        raise HTTPException(502, f"Falha ao contactar modelo: {e}")

# =========================
# RAG Endpoint Models
# =========================
class AskRequest(BaseModel):
    question: str
    courseid: int
    top_k: Optional[int] = 6
    
    @validator('question')
    def validate_question(cls, v):
        if not v or not v.strip():
            raise ValueError('Pergunta não pode estar vazia')
        if len(v) > 2000:
            raise ValueError('Pergunta muito longa (máximo 2000 caracteres)')
        return v.strip()
    
    @validator('courseid')
    def validate_courseid(cls, v):
        if v <= 0:
            raise ValueError('Course ID deve ser positivo')
        return v
    
    @validator('top_k')
    def validate_top_k(cls, v):
        if v is not None and (v < 1 or v > 20):
            raise ValueError('top_k deve estar entre 1 e 20')
        return v

class AskResponse(BaseModel):
    question: str
    courseid: int
    passages: List[Dict[str, Any]]
    answer: str
    metadata: Dict[str, Any]

# =========================
# RAG Endpoint - VERSÃO ATUALIZADA
# =========================
@app.post("/ask", response_model=AskResponse)
def ask(request: AskRequest):
    """
    RAG endpoint atualizado com melhor logging e tratamento de erros
    """
    start_time = time.time()
    logger.info(f"=== PROCESSING ASK REQUEST ===")
    logger.info(f"Question: {request.question[:100]}...")
    logger.info(f"Course ID: {request.courseid}")
    logger.info(f"Top K: {request.top_k}")
    
    try:
        # Get retriever (cached)
        logger.info("Getting retriever...")
        retriever = get_retriever(request.courseid)
        
        if not retriever.ok():
            error_msg = (
                f"RAG não disponível para o curso {request.courseid}. "
                f"Erro: {retriever.last_error()}. "
                f"Para gerar o índice, execute: python3 ingest.py {request.courseid}"
            )
            logger.error(error_msg)
            raise HTTPException(500, error_msg)
        
        logger.info("Retriever OK, performing search...")
        
        # Retrieve relevant passages
        k = min(max(1, request.top_k or 6), 20)  # Clamp between 1-20
        passages = retriever.search(request.question, k=k)
        
        logger.info(f"Search completed: {len(passages)} passages found")
        
        if not passages:
            logger.warning("No relevant passages found")
            return AskResponse(
                question=request.question,
                courseid=request.courseid,
                passages=[],
                answer="Não foram encontrados conteúdos relevantes para esta pergunta no curso.",
                metadata={"processing_time": time.time() - start_time, "status": "no_content"}
            )
        
        # Build context for LLM
        logger.info("Building context for LLM...")
        context_parts = []
        for i, passage in enumerate(passages[:k], 1):
            text = passage.get("text") or passage.get("content") or passage.get("snippet") or ""
            source = passage.get("source") or passage.get("file") or passage.get("url") or f"Documento {i}"
            
            if text.strip():
                context_parts.append(f"[CONTEÚDO {i}]\n{text.strip()}\n(Fonte: {source})\n")
        
        if not context_parts:
            return AskResponse(
                question=request.question,
                courseid=request.courseid,
                passages=passages,
                answer="Os conteúdos encontrados não possuem texto relevante para responder à pergunta.",
                metadata={"processing_time": time.time() - start_time, "status": "no_text"}
            )
        
        context = "\n".join(context_parts)
        
        # Generate prompt
        prompt = f"""Pergunta: {request.question}

Com base nos seguintes conteúdos do curso, forneça uma resposta educativa e completa:

{context}

Instruções para a resposta:
- Use apenas as informações fornecidas nos conteúdos acima
- Seja claro, educativo e bem estruturado
- Cite as fontes quando relevante (Fonte: ...)
- Se a informação não estiver completa nos conteúdos, mencione isso
- Organize a resposta de forma didática"""
        
        logger.info("Generating LLM response...")
        # Generate answer
        answer = call_llm_openai(prompt)
        
        processing_time = time.time() - start_time
        logger.info(f"=== REQUEST COMPLETED IN {processing_time:.2f}s ===")
        
        return AskResponse(
            question=request.question,
            courseid=request.courseid,
            passages=passages,
            answer=answer,
            metadata={
                "processing_time": processing_time,
                "passages_found": len(passages),
                "context_length": len(context),
                "model_used": MODEL_NAME if MODEL_NAME else "simulated",
                "status": "success"
            }
        )
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Unexpected error in /ask: {e}")
        import traceback
        logger.error(f"Full traceback:\n{traceback.format_exc()}")
        raise HTTPException(500, f"Erro interno: {e}")

# =========================
# Additional Utility Endpoints
# =========================
@app.get("/courses/{courseid}/rag-status")
def rag_status(courseid: int):
    """Check RAG availability for a specific course"""
    index_path = os.path.join(INDEX_DIR, f"course_{courseid}.faiss")
    meta_path = os.path.join(INDEX_DIR, f"course_{courseid}.meta.json")
    
    status = {
        "courseid": courseid,
        "rag_available": False,
        "index_exists": os.path.exists(index_path),
        "meta_exists": os.path.exists(meta_path),
        "last_modified": None,
        "index_dir": INDEX_DIR
    }
    
    if status["index_exists"] and status["meta_exists"]:
        try:
            retriever = get_retriever(courseid)
            status["rag_available"] = retriever.ok()
            status["last_modified"] = max(
                os.path.getmtime(index_path),
                os.path.getmtime(meta_path)
            )
            if not status["rag_available"]:
                status["error"] = retriever.last_error()
        except Exception as e:
            status["error"] = str(e)
    
    return status

# =========================
# Startup Event
# =========================
@app.on_event("startup")
async def startup_event():
    """Application startup event"""
    logger.info("=== ASSISTANT GATEWAY STARTING ===")
    logger.info(f"Version: 1.2.0")
    logger.info(f"INDEX_DIR: {INDEX_DIR}")
    logger.info(f"INDEX_DIR exists: {os.path.exists(INDEX_DIR)}")
    logger.info(f"MOODLE_URL: {MOODLE_URL}")
    logger.info(f"Has MOODLE_TOKEN: {bool(MOODLE_TOKEN)}")
    logger.info(f"Has LLM config: {bool(MODEL_API_URL and MODEL_API_KEY)}")
    logger.info(f"Working Directory: {os.getcwd()}")
    
    # List available course indexes
    if os.path.exists(INDEX_DIR):
        try:
            files = os.listdir(INDEX_DIR)
            course_files = [f for f in files if f.startswith("course_") and (f.endswith(".faiss") or f.endswith("_meta.json"))]
            logger.info(f"Available course indexes: {course_files}")
        except Exception as e:
            logger.warning(f"Could not list INDEX_DIR contents: {e}")
    
    logger.info("=== STARTUP COMPLETE ===")

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="127.0.0.1", port=8000)
