Skip to content

Commit f98790f

Browse files
Jon OlickJon Olick
authored andcommitted
fixed chat window
1 parent 35ae02d commit f98790f

File tree

2 files changed

+108
-33
lines changed

2 files changed

+108
-33
lines changed

aisearch.py

Lines changed: 33 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -907,23 +907,45 @@ def chat_about_matches(matches: List[Dict[str, Any]],
907907
4. Clear, factual analysis based only on the provided code
908908
909909
When referring to matches, ALWAYS use the FULL file path and line number (e.g., '/path/to/file.cpp:123') rather than match numbers or just the filename.
910-
Keep your responses concise and to the point."""
910+
Keep your responses concise and to the point.
911+
912+
IMPORTANT: Always directly address the user's questions. If they ask a specific question about the code or results, make sure to answer that question directly rather than continuing your previous analysis.
913+
"""
911914

912915
print(f"\nEntering chat mode. {len(matches)} matches found for: '{original_prompt}'")
913916
print("Ask questions about the search results or type 'exit' to quit.\n")
914917

915-
history = [{
916-
"role": "user",
917-
"content": f"These are code search results for: '{original_prompt}'\n\n{combined_contexts}\n\nPlease analyze these findings."
918-
}]
918+
# Store the initial search results as a fixed context that will always be included
919+
search_context = f"These are code search results for: '{original_prompt}'\n\n{combined_contexts}"
920+
921+
# Keep track of user-assistant exchanges separately from the fixed context
922+
exchanges = []
919923

924+
# Initial analysis request
925+
initial_prompt = "Please analyze these findings."
926+
920927
while True:
921928
user_input = input("\nYou: ")
922929
if user_input.lower() in {"exit", "quit"}:
923930
print("Exiting chat.")
924931
break
925932

926-
history.append({"role": "user", "content": user_input})
933+
# Add user message to exchanges
934+
exchanges.append({"role": "user", "content": user_input})
935+
936+
# Limit exchanges to prevent context overflow
937+
recent_exchanges = exchanges[-12:] if len(exchanges) > 12 else exchanges
938+
939+
# Construct messages: first the context message, then recent exchanges
940+
messages = [{"role": "user", "content": search_context}]
941+
942+
# Add a separator if we have exchanges
943+
if recent_exchanges:
944+
messages.append({"role": "assistant", "content": "I'll analyze these code search results. What specific aspects would you like me to focus on?"})
945+
messages.extend(recent_exchanges)
946+
else:
947+
# If this is the first exchange, add the initial analysis request
948+
messages.append({"role": "user", "content": initial_prompt})
927949

928950
if provider == "anthropic":
929951
# Use streaming API for Anthropic
@@ -939,7 +961,7 @@ def chat_about_matches(matches: List[Dict[str, Any]],
939961
"type": "enabled",
940962
"budget_tokens": 2048
941963
},
942-
messages=history[-10:] # Send only the last 10 exchanges
964+
messages=messages
943965
) as stream:
944966
for text in stream.text_stream:
945967
print(text, end="", flush=True)
@@ -951,7 +973,7 @@ def chat_about_matches(matches: List[Dict[str, Any]],
951973

952974
stream = client.chat.completions.create(
953975
model="o3-mini",
954-
messages=[{"role": "system", "content": system_message}] + history[-10:],
976+
messages=[{"role": "system", "content": system_message}] + messages,
955977
temperature=1,
956978
max_completion_tokens=4096,
957979
stream=True
@@ -964,7 +986,9 @@ def chat_about_matches(matches: List[Dict[str, Any]],
964986
full_response += text
965987

966988
print() # Add a newline after the streamed response
967-
history.append({"role": "assistant", "content": full_response})
989+
990+
# Add assistant response to exchanges
991+
exchanges.append({"role": "assistant", "content": full_response})
968992

969993

970994
def clear_file_cache() -> None:

aisearch_gui.py

Lines changed: 75 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -151,13 +151,14 @@ def stop(self):
151151
self.stop_requested = True
152152

153153
class ChatThread(threading.Thread):
154-
def __init__(self, parent, matches, prompt, question):
154+
def __init__(self, parent, matches, prompt, question, chat_history=None):
155155
super().__init__()
156156
self.parent = parent
157157
self.matches = matches
158158
self.prompt = prompt
159159
self.question = question
160160
self.provider = parent.provider_combo.currentText()
161+
self.chat_history = chat_history or []
161162

162163
def run(self):
163164
try:
@@ -181,16 +182,35 @@ def run(self):
181182
4. Clear, factual analysis based only on the provided code
182183
183184
When referring to matches, ALWAYS use the file path and line number (e.g., 'In file.cpp:123') rather than match numbers.
184-
Keep your responses concise and to the point."""
185+
Keep your responses concise and to the point.
185186
186-
messages = [
187-
{"role": "user", "content": f"These are code search results for: '{self.prompt}'\n\n{combined_contexts}"}
188-
]
187+
IMPORTANT: Always directly address the user's questions. If they ask a specific question about the code or results,
188+
make sure to answer that question directly rather than continuing your previous analysis."""
189189

190-
if self.question:
191-
messages.append({"role": "user", "content": self.question})
190+
# Prepare messages
191+
if not self.chat_history:
192+
# First message in the conversation
193+
messages = [
194+
{"role": "user", "content": f"These are code search results for: '{self.prompt}'\n\n{combined_contexts}"}
195+
]
196+
197+
if self.question:
198+
messages.append({"role": "user", "content": self.question})
199+
else:
200+
messages.append({"role": "user", "content": "Please analyze these findings."})
192201
else:
193-
messages.append({"role": "user", "content": "Please analyze these findings."})
202+
# We have existing chat history
203+
# First message always contains the search context
204+
messages = [
205+
{"role": "user", "content": f"These are code search results for: '{self.prompt}'\n\n{combined_contexts}"}
206+
]
207+
# Add a separator message
208+
messages.append({"role": "assistant", "content": "I'll analyze these code search results. What would you like to know?"})
209+
# Add the conversation history (limited to last 10 exchanges to prevent context overflow)
210+
messages.extend(self.chat_history[-10:])
211+
# Add the new question
212+
if self.question:
213+
messages.append({"role": "user", "content": self.question})
194214

195215
if self.provider == "anthropic":
196216
response = client.messages.create(
@@ -210,7 +230,19 @@ def run(self):
210230
)
211231
response_text = response.choices[0].message.content
212232

213-
self.parent.signal_chat_response.emit(response_text)
233+
# Save question and response to history
234+
if self.question:
235+
new_history = self.chat_history + [
236+
{"role": "user", "content": self.question},
237+
{"role": "assistant", "content": response_text}
238+
]
239+
else:
240+
new_history = self.chat_history + [
241+
{"role": "assistant", "content": response_text}
242+
]
243+
244+
# Use tuple to send both response and history
245+
self.parent.signal_chat_response.emit((response_text, new_history))
214246

215247
except Exception as e:
216248
self.parent.signal_error.emit(str(e))
@@ -1099,13 +1131,14 @@ class AISearchGUI(QMainWindow):
10991131
signal_update_terms = Signal(str)
11001132
signal_search_complete = Signal(int)
11011133
signal_error = Signal(str)
1102-
signal_chat_response = Signal(str)
1134+
signal_chat_response = Signal(object) # Changed to object to support tuple
11031135

11041136
def __init__(self):
11051137
super().__init__()
11061138
self.matches = []
11071139
self.search_thread = None
11081140
self.chat_thread = None
1141+
self.chat_history = [] # Add chat history
11091142
self.settings = QSettings("AICodeSearch", "AISearchGUI")
11101143
self.results_buffer = ResultsBuffer()
11111144
self.update_timer = QTimer(self)
@@ -1917,6 +1950,7 @@ def clear_results(self):
19171950
self.chat_input.clear()
19181951
self.file_list.clear()
19191952
self.matches = []
1953+
self.chat_history = [] # Clear chat history
19201954
self.chat_button.setEnabled(False)
19211955
self.chat_action.setEnabled(False)
19221956
self.refine_button.setEnabled(False)
@@ -1939,16 +1973,37 @@ def start_chat(self):
19391973
# Switch to chat tab
19401974
self.tabs.setCurrentIndex(1)
19411975

1942-
# Start chat thread
1943-
self.chat_thread = ChatThread(self, self.matches, prompt, question)
1976+
# Start chat thread with history
1977+
self.chat_thread = ChatThread(self, self.matches, prompt, question, self.chat_history)
19441978
self.chat_thread.start()
19451979

1946-
@Slot(str)
1947-
def update_chat(self, response):
1980+
@Slot(object)
1981+
def update_chat(self, response_data):
19481982
"""Update chat output with AI response"""
1983+
# Unpack response and history
1984+
if isinstance(response_data, tuple):
1985+
response, self.chat_history = response_data
1986+
else:
1987+
# For backward compatibility
1988+
response = response_data
1989+
19491990
# Store original markdown for copy functionality
19501991
self.chat_output.markdown_content = response
19511992

1993+
# Clear previous content and show only the latest exchange
1994+
self.chat_output.clear()
1995+
1996+
# Add user question to the display if it exists
1997+
if self.chat_input.toPlainText().strip():
1998+
# Format with user question and AI response
1999+
content = f"**You:** {self.chat_input.toPlainText().strip()}\n\n**AI:**\n\n{response}"
2000+
self.chat_output.setMarkdown(content)
2001+
# Clear input after sending
2002+
self.chat_input.clear()
2003+
else:
2004+
# Initial analysis (no user question)
2005+
self.chat_output.setMarkdown(response)
2006+
19522007
# Convert markdown to HTML
19532008
try:
19542009
# Set up extensions based on available libraries
@@ -1972,7 +2027,7 @@ def update_chat(self, response):
19722027
self.chat_output.document().setDefaultStyleSheet(current_css + pygments_css)
19732028

19742029
html_content = markdown.markdown(
1975-
response,
2030+
self.chat_output.toMarkdown(),
19762031
extensions=extensions,
19772032
extension_configs=extension_configs
19782033
)
@@ -1986,15 +2041,11 @@ def update_chat(self, response):
19862041

19872042
# Set the HTML content
19882043
self.chat_output.setHtml(html_content)
1989-
except Exception as e:
1990-
# Fallback to plain text if markdown conversion fails
1991-
self.chat_output.setPlainText(response)
1992-
self.statusBar().showMessage(f"Markdown rendering error: {str(e)}")
1993-
1994-
self.progress_bar.setVisible(False)
1995-
self.chat_button.setEnabled(True)
1996-
# Remove chat_action reference
1997-
self.statusBar().showMessage("Chat response received")
2044+
2045+
# Hide progress and enable chat button
2046+
finally:
2047+
self.progress_bar.setVisible(False)
2048+
self.chat_button.setEnabled(True)
19982049

19992050
def loadSettings(self):
20002051
"""Load saved settings"""

0 commit comments

Comments
 (0)