|
| 1 | +import os |
| 2 | +from abc import ABC, abstractmethod |
| 3 | +from datetime import timedelta |
| 4 | + |
| 5 | +from vechord.log import logger |
| 6 | + |
| 7 | + |
| 8 | +class BaseAugmenter(ABC): |
| 9 | + @abstractmethod |
| 10 | + def reset(self, doc: str): |
| 11 | + """Cache the document for augmentation.""" |
| 12 | + raise NotImplementedError |
| 13 | + |
| 14 | + @abstractmethod |
| 15 | + def name(self) -> str: |
| 16 | + raise NotImplementedError |
| 17 | + |
| 18 | + @abstractmethod |
| 19 | + def augment_context(self, chunks: list[str]) -> list[str]: |
| 20 | + raise NotImplementedError |
| 21 | + |
| 22 | + @abstractmethod |
| 23 | + def augment_query(self, chunks: list[str]) -> list[str]: |
| 24 | + raise NotImplementedError |
| 25 | + |
| 26 | + @abstractmethod |
| 27 | + def summarize_doc(self) -> str: |
| 28 | + raise NotImplementedError |
| 29 | + |
| 30 | + |
| 31 | +class GeminiAugmenter(BaseAugmenter): |
| 32 | + def __init__(self, model: str = "models/gemini-1.5-flash-001", ttl_sec: int = 600): |
| 33 | + """Gemini Augmenter with cache. |
| 34 | +
|
| 35 | + Minimal cache token is 32768. |
| 36 | + """ |
| 37 | + key = os.environ.get("GEMINI_API_KEY") |
| 38 | + if not key: |
| 39 | + raise ValueError("env GEMINI_API_KEY not set") |
| 40 | + |
| 41 | + self.model_name = model |
| 42 | + self.ttl_sec = ttl_sec |
| 43 | + self.min_token = 32768 |
| 44 | + |
| 45 | + def name(self) -> str: |
| 46 | + return f"gemini_augment_{self.model_name}" |
| 47 | + |
| 48 | + def reset(self, doc: str): |
| 49 | + import google.generativeai as genai |
| 50 | + |
| 51 | + self.client = genai.GenerativeModel(model_name=self.model_name) |
| 52 | + tokens = self.client.count_tokens(doc).total_tokens |
| 53 | + self.doc = "" # empty means doc is in the cache |
| 54 | + if tokens <= self.min_token: |
| 55 | + # cannot use cache due to the Gemini token limit |
| 56 | + self.doc = doc |
| 57 | + else: |
| 58 | + logger.debug("use cache since the doc has %d tokens", tokens) |
| 59 | + cache = genai.caching.CachedContent.create( |
| 60 | + model=self.model_name, |
| 61 | + system_instruction=( |
| 62 | + "You are an expert on the natural language understanding. " |
| 63 | + "Answer the questions based on the whole document you have access to." |
| 64 | + ), |
| 65 | + contents=doc, |
| 66 | + ttl=timedelta(seconds=self.ttl_sec), |
| 67 | + ) |
| 68 | + self.client = genai.GenerativeModel.from_cached_content( |
| 69 | + cached_content=cache |
| 70 | + ) |
| 71 | + |
| 72 | + def augment(self, chunks: list[str], prompt: str) -> list[str]: |
| 73 | + res = [] |
| 74 | + try: |
| 75 | + for chunk in chunks: |
| 76 | + context = prompt.format(chunk=chunk) |
| 77 | + if self.doc: |
| 78 | + context = f"<document>{self.doc}</document>\n" + context |
| 79 | + response = self.client.generate_content([context]) |
| 80 | + res.append(response.text) |
| 81 | + except Exception as e: |
| 82 | + logger.error("GeminiAugmenter error: %s", e) |
| 83 | + breakpoint() |
| 84 | + return res |
| 85 | + |
| 86 | + def augment_context(self, chunks: list[str]) -> list[str]: |
| 87 | + prompt = ( |
| 88 | + "Here is the chunk we want to situate within the whole document " |
| 89 | + "<chunk>{chunk}</chunk>" |
| 90 | + "Please give a short succinct context to situate this chunk within " |
| 91 | + "the overall document for the purposes of improving search retrieval " |
| 92 | + "of the chunk. Answer only with the succinct context and nothing else." |
| 93 | + ) |
| 94 | + return self.augment(chunks, prompt) |
| 95 | + |
| 96 | + def augment_query(self, chunks: list[str]) -> list[str]: |
| 97 | + prompt = ( |
| 98 | + "Here is the chunk we want to ask questions about " |
| 99 | + "<chunk>{chunk}</chunk>" |
| 100 | + "Please ask questions about this chunk based on the overall document " |
| 101 | + "for the purposes of improving search retrieval of the chunk. " |
| 102 | + "Answer only with the question and nothing else." |
| 103 | + ) |
| 104 | + return self.augment(chunks, prompt) |
| 105 | + |
| 106 | + def summarize_doc(self) -> str: |
| 107 | + prompt = ( |
| 108 | + "Summarize the provided document concisely while preserving its key " |
| 109 | + "ideas, main arguments, and essential details. Ensure clarity and " |
| 110 | + "coherence, avoiding unnecessary repetition." |
| 111 | + ) |
| 112 | + if self.doc: |
| 113 | + prompt = f"<document>{self.doc}</document>\n" + prompt |
| 114 | + response = self.client.generate_content([prompt]) |
| 115 | + return response.text |
0 commit comments