from fastapi import UploadFile, File, Form, APIRouter, Depends, HTTPException, status
from fastapi.security import OAuth2PasswordRequestForm,HTTPBearer, HTTPAuthorizationCredentials
from fastapi.responses import JSONResponse
from dotenv import load_dotenv
import requests
import os
import json
import uuid
import docx2pdf
from openai import OpenAI
from dependencies.HybridSearch import HybridSearch
from dependencies.awsutils import S3Service
from dependencies.helper import *
from celery_worker import process_document_celery
from celery.result import AsyncResult
import db_config.database as dbase
from db_config.models import User, Folder, Documents, ChatHistory
import db_config.schemas as schemas
import db_config.auth as auth


router = APIRouter()
security = HTTPBearer()
load_dotenv()

collection_name = f"{os.getenv('COLLECTION_NAME')}"
# collection_name = f"{os.getenv('COLLECTION_NAME')}_{uuid.uuid4().hex}"
DB_FOLDER = str(os.getenv('DB_PATH'))+"/"
UPLOAD_FOLDER = str(os.getenv('STORAGE_PATH'))+"/"
openai_client=OpenAI()

@router.post("/add")
async def upload_pdf(
    doc_select: UploadFile = File(None), 
    folder_id: int = Form(...),
    doc_name: str = Form(...),
    doc_url:str = Form(None), 
    metadata: str = Form(None),
    credentials: HTTPAuthorizationCredentials = Depends(security)
):
    # Save the uploaded file to a temporary location
    db = dbase.SessionLocal()
    token = credentials.credentials
    user = auth.get_current_user(db, token)

    if metadata:
        try:
            metadata = json.loads(metadata)  # Convert string to JSON
        except json.JSONDecodeError:
            return errorResponse(
                    "Invalid JSON format in metadata"
                )

    if doc_select == None and not doc_url:
        return errorResponse(
                "Please provide either a document file or a document URL"
            )  #  
    if doc_select and not doc_select.filename and not doc_url:
        return errorResponse("Please provide either a document file or a document URL.")

    folder = db.query(Folder).filter(Folder.user_id == user.user_id).filter(Folder.folder_id == folder_id).first()

    if not folder:
        return errorResponse("Folder ID does not exist.")

    upload_dir = str(UPLOAD_FOLDER)+str(user.user_code)+"/"+str(folder.sanitized_name)
    os.makedirs(upload_dir, exist_ok=True)

    try:
        if doc_select.filename:
            document_path = os.path.join(upload_dir, doc_select.filename)
            file_extension = os.path.splitext(doc_select.filename)[1]
            # Save the uploaded PDF locally
            with open(document_path, "wb") as f:
                f.write(await doc_select.read())
            
            doc_select.file.seek(0)
            document_name = os.path.splitext(doc_select.filename)[0]
        else:
            document_name = os.path.basename(doc_url).split('.')[-2]
            file_extension = os.path.splitext(doc_url)[1]
            sanitized_name = replace_special_characters(document_name)
            document_path = os.path.join(upload_dir, f"{sanitized_name}{file_extension}")
            
            download_response = download_file(doc_url, document_path)
            # document_name = os.path.basename(doc_select.filename).split('.')[-2]
            # document_ext = os.path.basename(doc_select.filename).split('.')[-1]
        
            if download_response.status_code == 400 or download_response.status_code == 500:
                parsed_body = json.loads(download_response.body)
                message = parsed_body.get("message")
                return errorResponse(message,download_response.status_code)
        
        sanitized_name = replace_special_characters(document_name)
        
        s3_service = S3Service()
        s3_service.upload_object(upload_dir, sanitized_name, doc_select if doc_select else None)
        
        file_key = f"{upload_dir}/{sanitized_name}{file_extension}"
        document_url = s3_service.get_object_url(file_key)
        
        if folder_id:
            new_document = Documents(
                doc_name=doc_name,
                doc_url=document_url,
                sanitized_name = sanitized_name,
                document_type=file_extension.lstrip("."), 
                folder_id=folder_id,
                doc_status="Processing",
                doc_metadata= metadata if metadata else None
            )
            db.add(new_document)
            db.commit()
            db.refresh(new_document)

        # hs = HybridSearch(collection_name,user.user_code, folder.sanitized_name)
        # hs.upsert_document(document_path, document_name, sanitized_name, None, metadata, upload_dir)
        # document_uuid = hs.get_document_uuid(sanitized_name)
        
        task = process_document_celery.delay(document_path, document_name, sanitized_name, document_url,upload_dir, metadata, user.user_code,collection_name, folder.sanitized_name,new_document.doc_id)

        if new_document and new_document.doc_id:
            update_status = db.query(Documents).filter(Documents.doc_id == new_document.doc_id).first()
            update_status.task_id=task.id
            db.commit()
            updated_data = db.query(Documents).filter(Documents.doc_id == new_document.doc_id).first()
            db.refresh(updated_data)
            document_data = {
                "doc_id": new_document.doc_id,
                "doc_name":doc_name,
                "doc_url":document_url,
                "task_id": task.id,
                # "doc_uuid": document_uuid,
                "doc_status":new_document.doc_status
            }
            return successReponse(
                "Document uploaded and is in queue for processing.",
                document_data
            )
       
    except Exception as e:
        update_status = db.query(Documents).filter(Documents.doc_id == new_document.doc_id).first()
        update_status.doc_status="Failed"
        db.commit()
        db.rollback()
        raise HTTPException(status_code=500, detail=f"Failed to save file: {str(e)}")
    # return {"status": "Document uploaded and processed", "filename": file.filename,"document_id":document_uuid}

@router.put("/rename/{doc_id}")
async def updateDocName(doc_id: int, data: schemas.Document, credentials: HTTPAuthorizationCredentials = Depends(security)):
    db = dbase.SessionLocal()
    token = credentials.credentials
    user = auth.get_current_user(db, token)
    try:
        '''
        Update Document
        '''
        document_name = data.doc_name.strip() if data.doc_name else ''
        if not document_name:
            return errorResponse(
                "Document name is required."
            )
        existing_doc = db.query(Documents).join(Documents.folder).filter(Documents.doc_name == document_name).filter(Folder.user_id == user.user_id).first()
        if existing_doc: 
            return errorResponse(
                "Cannot update document with duplicate names."
            )
        doc = db.query(Documents).filter(Documents.doc_id == doc_id).first()
        if not doc:
            return errorResponse(
                "Document does not exist.",
                404
            )
        doc.doc_name = data.doc_name
        db.commit()
        
        updated_data = db.query(Documents).filter(Documents.doc_id == doc_id).first()
        db.refresh(updated_data)
        
        return successReponse(
            "Document successfully updated.",
            schemas.DocumentResponse.from_orm(updated_data).dict()
        )

    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Something went wrong: {str(e)}")

@router.delete('/delete/{doc_id}')
async def deleteDocument(doc_id: int, credentials: HTTPAuthorizationCredentials = Depends(security)):
    db = dbase.SessionLocal()
    token = credentials.credentials
    user = auth.get_current_user(db, token)
    try:
        document = db.query(Documents).filter(Documents.doc_id == doc_id).first()
        if not document:
            return errorResponse(
                "Document does not exist.",
                404
            )
        folder = db.query(Folder).filter(Folder.folder_id == document.folder_id).first()
        if document.document_uuid:
            ## Delete collection data
            hs = HybridSearch(collection_name,user, folder.sanitized_name)
            hs.delete_collection_by_uuid(document.document_uuid)
            #####
            ### Delete uploaded document from server
            UPLOAD_PATH = os.getenv("STORAGE_PATH")+"/"+str(user.user_code)+"/"+str(folder.sanitized_name)
            file_path = f"{UPLOAD_PATH}/{document.sanitized_name}.{document.document_type}"
            try:
                # Remove the file
                os.remove(file_path)
                print(f"File '{file_path}' has been deleted.")
            except FileNotFoundError:
                print(f"File '{file_path}' not found.")
            except PermissionError:
                print(f"Permission denied: Unable to delete '{file_path}'.")
            except Exception as e:
                print(f"An error occurred: {e}")
            #########
            ### Delete uploaded document from AWS S3
            s3_service = S3Service()
            s3_service.delete_file(file_path)
        
        db.delete(document)
        db.commit()
        return successReponse(
            "Document deleted successfully",
            None
        )

    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Something went wrong: {str(e)}")

# Get folders documents
@router.get("/list/{folder_id}", response_model=schemas.DocumentListResponse)
async def get_documents(folder_id: int, credentials: HTTPAuthorizationCredentials = Depends(security)):
    db = dbase.SessionLocal()
    token = credentials.credentials
    user = auth.get_current_user(db, token)
    try:
        documents = db.query(Documents).join(Documents.folder).filter(Folder.user_id == user.user_id).filter(Documents.folder_id == folder_id).all()
        if not documents and len(documents) == 0:
            return errorResponse(
                "No any document found."
            )  #
        else:
            document_data = [schemas.DocumentResponse.from_orm(document) for document in documents]
            return successReponse(
                "Documents retrieved successfully",
                [document.dict() for document in document_data]
            )
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Something went wrong: {str(e)}")

@router.get("/task/{task_id}")
async def get_task_status(task_id: str):
    task_result = AsyncResult(task_id)
    return {
        "status": task_result.status, 
        "result": str(task_result.result),  # Get the task result
        "traceback": task_result.traceback  # Get detailed error logs
    }


def download_file(url: str, save_path: str, chunk_size: int = 1024):
    """
    Downloads a file from the specified URL and saves it to the given path.
    
    Args:
        url (str): The URL of the file to download.
        save_path (str): The local path where the file should be saved.
        chunk_size (int, optional): The chunk size for streaming data. Defaults to 1024.
    
    Returns:
        str: The path of the saved file.
    
    Raises:
        ValueError: If the URL is invalid or the response status is not successful.
        Exception: For other errors during download or file saving.
    """
    
    try:
        # Validate the URL
        if not url.startswith(("http://", "https://")):
            return errorResponse(f"Invalid URL. URL must start with 'http://' or 'https://'.")

        # Send a request to the URL
        response = requests.get(url, stream=True, timeout=10)
        response.raise_for_status()  # Raise an error for HTTP codes 4xx or 5xx

        # Get the total file size from the headers (optional, for validation or logs)
        total_size = int(response.headers.get('content-length', 0))
        
        # Create directory if not exists
        os.makedirs(os.path.dirname(save_path), exist_ok=True)

        # Write the file in chunks
        with open(save_path, "wb") as file:
            for chunk in response.iter_content(chunk_size=chunk_size):
                if chunk:  # Filter out keep-alive chunks
                    file.write(chunk)

        return successReponse('',save_path)

    except requests.exceptions.RequestException as e:
        return errorResponse(f"Failed to download file from URL: {str(e)}",400)

    except Exception as e:
        return errorResponse(f"An error occurred while saving the file: {e}",500)