summaryrefslogtreecommitdiff
path: root/hag/generator.py
blob: d0de468509ffb19b7f34ab8f11a8530db6033db7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
"""LLM generation wrapper for producing answers from retrieved context."""

import logging
from typing import List

import torch

from hag.config import GeneratorConfig

logger = logging.getLogger(__name__)

PROMPT_TEMPLATE = """Answer the following question based on the provided context passages. Give ONLY the answer itself in a few words, with no explanation.

Context:
{context}

Question: {question}

Answer:"""


class Generator:
    """LLM-based answer generator.

    Uses a HuggingFace causal LM (e.g., Llama-3.1-8B-Instruct).
    For testing, use FakeGenerator instead.
    """

    def __init__(self, config: GeneratorConfig, device: str = "cpu") -> None:
        self.config = config
        self.device = torch.device(device)
        self._tokenizer = None
        self._model = None

    def _load_model(self) -> None:
        """Lazy-load the model and tokenizer, placing model on device."""
        from transformers import AutoModelForCausalLM, AutoTokenizer

        logger.info("Loading generator model: %s (device=%s)", self.config.model_name, self.device)
        self._tokenizer = AutoTokenizer.from_pretrained(self.config.model_name)
        self._model = AutoModelForCausalLM.from_pretrained(
            self.config.model_name,
            torch_dtype="auto",
            device_map=self.device,
        )
        self._model.eval()

    def generate(self, question: str, passages: List[str]) -> str:
        """Generate an answer given a question and retrieved passages.

        Args:
            question: the user question
            passages: list of retrieved passage texts

        Returns:
            Generated answer string.
        """
        if self._model is None:
            self._load_model()

        context = "\n\n".join(
            f"[{i+1}] {p}" for i, p in enumerate(passages)
        )
        prompt = PROMPT_TEMPLATE.format(context=context, question=question)

        inputs = self._tokenizer(prompt, return_tensors="pt")
        inputs = {k: v.to(self.device) for k, v in inputs.items()}
        outputs = self._model.generate(
            **inputs,
            max_new_tokens=self.config.max_new_tokens,
            temperature=self.config.temperature if self.config.temperature > 0 else None,
            do_sample=self.config.temperature > 0,
            repetition_penalty=1.2,
        )
        # Decode only the generated tokens (skip the prompt)
        generated = outputs[0][inputs["input_ids"].shape[1]:]
        answer = self._tokenizer.decode(generated, skip_special_tokens=True).strip()
        # Take only the first sentence/line as the answer
        for sep in ["\n", ". ", ".\n"]:
            if sep in answer:
                answer = answer.split(sep)[0].strip()
                break
        return answer


class FakeGenerator:
    """Deterministic mock generator for testing. No model download needed."""

    def generate(self, question: str, passages: List[str]) -> str:
        """Return a mock answer.

        Args:
            question: the user question
            passages: list of retrieved passages

        Returns:
            Mock answer string.
        """
        return "mock answer"