# app/ingest.py
"""Ingestion CLI for loading documents into RAG system."""
import glob
import os
from typing import Any, Dict, List
import sys

from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_redis import RedisVectorStore

from .config import settings
from .main_helpers import get_index_name


def chunk_text(text: str, chunk_size: int, chunk_overlap: int) -> List[str]:
    """Split text into overlapping chunks."""
    splitter = RecursiveCharacterTextSplitter(
        chunk_size=chunk_size, chunk_overlap=chunk_overlap
    )
    return splitter.split_text(text)


def ingest_documents(course_id: str = None) -> None:
    """Ingest all .txt files under ./app/data into Redis with stable IDs for a course."""
    if course_id is None:
        course_id = settings.DEFAULT_COURSE_ID
    
    files = sorted(
        glob.glob(os.path.join(settings.DATA_DIR, "**", "*.txt"), recursive=True)
    )
    if not files:
        print("No documents found. Exiting.")
        return

    print(f"Found {len(files)} file(s) to ingest into course: {course_id}")

    embeddings = HuggingFaceEmbeddings(model_name=settings.MODEL_NAME)
    vectorstore = None
    index_name = get_index_name(course_id)

    total_chunks = 0
    for file_path in files:
        try:
            with open(file_path, "r", encoding="utf-8") as f:
                text = f.read()
        except UnicodeDecodeError:
            print(f"Skipping non-UTF8 file: {file_path}")
            continue

        filename = os.path.basename(file_path)
        file_id = filename
        rel_source = f"./app/data/{filename}"

        chunks = chunk_text(text, settings.CHUNK_SIZE, settings.CHUNK_OVERLAP)
        texts: List[str] = chunks
        metadatas: List[Dict[str, Any]] = []
        ids: List[str] = []
        for i, _chunk in enumerate(chunks):
            metadatas.append({
                "file_id": file_id,
                "source": rel_source,
                "chunk": i,
                "filename": filename,
                "course_id": course_id,
            })
            ids.append(f"{file_id}:{i:06d}")

        if not texts:
            continue

        if vectorstore is None:
            # First batch creates/attaches to the index
            vectorstore = RedisVectorStore.from_texts(
                texts=texts,
                metadatas=metadatas,
                embedding=embeddings,
                index_name=index_name,
                redis_url=settings.REDIS_URL,
                ids=ids,
            )
        else:
            vectorstore.add_texts(texts=texts, metadatas=metadatas, ids=ids)

        total_chunks += len(chunks)
        print(f"Ingested {filename}: {len(chunks)} chunk(s).")

    print(f"Ingestion complete. Course: {course_id}, Total chunks: {total_chunks}")


if __name__ == "__main__":
    # Create a sample document for demonstration
    if not os.path.exists(settings.DATA_DIR):
        os.makedirs(settings.DATA_DIR)
    sample_file = os.path.join(settings.DATA_DIR, "sample.txt")
    with open(sample_file, "w", encoding="utf-8") as f:
        f.write(
            "Redis is an in-memory data structure store, used as a database, "
            "cache, and message broker. "
        )
        f.write(
            "It supports data structures such as strings, hashes, lists, sets, "
            "and sorted sets with range queries. "
        )
        f.write(
            "Redis has built-in replication, Lua scripting, LRU eviction, "
            "transactions, and different levels of on-disk persistence."
        )

    # Optional: accept course_id from command line
    course_id = sys.argv[1] if len(sys.argv) > 1 else None
    ingest_documents(course_id)
