# app/services/llm_service.py
"""LLM service for synthesizing answers from course content."""
import logging
from typing import Optional, List

from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, SystemMessage, BaseMessage
from pydantic import BaseModel

from app.config import settings

logger = logging.getLogger(__name__)


class SourceDocument(BaseModel):
    """Metadata for a source document."""
    content: str
    filename: str
    chunk_id: Optional[str] = None


class SynthesisResult(BaseModel):
    """Result of LLM synthesis."""
    answer: str
    sources: List[SourceDocument]


class LLMService:
    """Service for synthesizing answers using OpenAI LLM with RAG context."""

    def __init__(
        self,
        api_key: str = settings.OPENAI_API_KEY,
        model: str = settings.OPENAI_MODEL,
        temperature: float = settings.LLM_TEMPERATURE,
        max_tokens: int = settings.LLM_MAX_TOKENS,
    ) -> None:
        """Initialize LLM service.
        
        Args:
            api_key: OpenAI API key
            model: Model name (e.g., "gpt-4o-mini")
            temperature: Temperature for generation (0-2)
            max_tokens: Maximum tokens in response
            
        Raises:
            ValueError: If API key is not provided
        """
        if not api_key:
            raise ValueError("OPENAI_API_KEY must be provided in environment or constructor")
        
        self.api_key = api_key
        self.model = model
        self.temperature = temperature
        self.max_tokens = max_tokens
        
        # Initialize ChatOpenAI
        self._llm = ChatOpenAI(
            api_key=api_key,
            model=model,
            temperature=temperature,
            max_tokens=max_tokens,
        )

    def get_llm(self) -> ChatOpenAI:
        """Get the underlying ChatOpenAI instance."""
        return self._llm

    def _build_prompt(
        self,
        query: str,
        context_docs: List[dict],
        guidelines: Optional[str] = None,
    ) -> List[BaseMessage]:
        """Build a prompt with context documents.
        
        Args:
            query: User's question
            context_docs: List of dicts with 'page_content' and 'metadata'
            guidelines: Optional custom guidelines for the LLM
            
        Returns:
            List of messages for the LLM
        """
        # Build context from documents
        context_text = "\n\n".join([
            f"[Source: {doc.get('metadata', {}).get('source', 'Unknown')}]\n{doc['page_content']}"
            for doc in context_docs
        ])

        # Default guidelines
        default_guidelines = (
            "You are a helpful educational assistant. "
            "Use the provided course materials to answer the user's question. "
            "If the information is not in the provided materials, say so explicitly. "
            "Provide practical examples when relevant. "
            "Be concise but thorough."
        )

        system_prompt = guidelines or default_guidelines

        system_message = SystemMessage(content=system_prompt)
        
        user_prompt = f"""Based on the following course materials, please answer this question:

{f"Guidelines: {guidelines}" if guidelines else ""}

**Course Materials:**
{context_text}

**Question:** {query}"""

        user_message = HumanMessage(content=user_prompt)

        return [system_message, user_message]

    def synthesize(
        self,
        query: str,
        context_docs: List[dict],
        guidelines: Optional[str] = None,
    ) -> SynthesisResult:
        """Synthesize an answer using the LLM and context documents.
        
        Args:
            query: User's question
            context_docs: List of dicts with 'page_content' and 'metadata' from RAG
            guidelines: Optional custom guidelines for the LLM
            
        Returns:
            SynthesisResult with answer and sources
            
        Raises:
            Exception: If LLM call fails
        """
        try:
            # Build prompt
            messages = self._build_prompt(query, context_docs, guidelines)
            
            # Call LLM
            response = self._llm.invoke(messages)
            
            # Extract answer
            answer = response.content if hasattr(response, 'content') else str(response)
            
            # Extract sources
            sources = [
                SourceDocument(
                    content=doc['page_content'],
                    filename=doc.get('metadata', {}).get('source', 'Unknown'),
                    chunk_id=doc.get('metadata', {}).get('id', None),
                )
                for doc in context_docs
            ]
            
            logger.info(f"LLM synthesis successful for query: {query[:50]}...")
            return SynthesisResult(answer=answer, sources=sources)
            
        except Exception as e:
            logger.error(f"LLM synthesis failed: {e}")
            raise
