summaryrefslogtreecommitdiff
path: root/code_eval/OpenCodeEval/backend/base.py
blob: 42091b96efee76829b033409f694c724e64953c9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
from typing import Callable
from abc import ABC, abstractmethod

def make_chat_template(
        prompt: str,
        response_prefix: str = "",
        is_chat: bool = True,
        tokenizer: Callable = None
    ) -> str:

    if is_chat:
        prompt = tokenizer.apply_chat_template(
            [
                {"role": "user", "content":  prompt},
            ],
            tokenize = False,
            add_generation_prompt = True
        ) + response_prefix
        if tokenizer.bos_token and prompt.startswith(tokenizer.bos_token):
            prompt = prompt[len(tokenizer.bos_token):]
        return prompt
    else:
        return prompt

class Generator(ABC):

    model_name: str = None

    def __init__(self, model_name: str) -> None:
        """
        :param stop_words: list
            list of stop words if the generation uses a stopping criteria during generation
        :param requires_execution: bool
            wheter the task requires code execution during evaluation or not
        """
        self.model_name = model_name

    def fewshot_examples(self):
        """Loads and returns the few-shot examples for the task if they exist."""
        pass

    @abstractmethod
    def set_stop(self):
        """
        Set the stop tokens for the model
        """
        pass

    @abstractmethod
    def generate(self):
        """Builds the prompt for the LM to generate from.
        :param doc: dict[str: str]
            sample from the test dataset
        """
        pass