|
1 | 1 | import os
|
2 | 2 |
|
3 | 3 | from dotenv import load_dotenv
|
| 4 | +from google.api_core import exceptions as google_exceptions |
4 | 5 | from google.genai import Client
|
5 | 6 | from langchain_core.messages import AIMessage
|
6 | 7 | from langchain_core.runnables import RunnableConfig
|
|
39 | 40 | genai_client = Client(api_key=os.getenv("GEMINI_API_KEY"))
|
40 | 41 |
|
41 | 42 |
|
| 43 | +def _handle_model_not_found(e: Exception, model_name: str): |
| 44 | + """Checks for a model not found error and raises a specific ValueError.""" |
| 45 | + if isinstance(e, google_exceptions.NotFound) or ( |
| 46 | + "404" in str(e) and "models/" in str(e) |
| 47 | + ): |
| 48 | + error_msg = f"Model '{model_name}' not found. Please try a different model." |
| 49 | + raise ValueError(error_msg) from e |
| 50 | + raise e |
| 51 | + |
| 52 | + |
42 | 53 | # Nodes
|
43 | 54 | def generate_query(state: OverallState, config: RunnableConfig) -> QueryGenerationState:
|
44 | 55 | """LangGraph node that generates search queries based on the User's question.
|
@@ -83,15 +94,7 @@ def generate_query(state: OverallState, config: RunnableConfig) -> QueryGenerati
|
83 | 94 | result = structured_llm.invoke(formatted_prompt)
|
84 | 95 | return {"search_query": result.query}
|
85 | 96 | except Exception as e:
|
86 |
| - # Check if this is a model not found error |
87 |
| - if "404" in str(e) and "models/" in str(e): |
88 |
| - # Return an error that the frontend can catch |
89 |
| - error_msg = ( |
90 |
| - f"Model '{model_to_use}' not found. Please try a different model." |
91 |
| - ) |
92 |
| - raise ValueError(error_msg) from e |
93 |
| - # Re-raise other errors |
94 |
| - raise |
| 97 | + _handle_model_not_found(e, model_to_use) |
95 | 98 |
|
96 | 99 |
|
97 | 100 | def continue_to_web_research(state: QueryGenerationState):
|
@@ -154,15 +157,7 @@ def web_research(state: WebSearchState, config: RunnableConfig) -> OverallState:
|
154 | 157 | "web_research_result": [modified_text],
|
155 | 158 | }
|
156 | 159 | except Exception as e:
|
157 |
| - # Check if this is a model not found error |
158 |
| - if "404" in str(e) and "models/" in str(e): |
159 |
| - # Return an error that the frontend can catch |
160 |
| - error_msg = ( |
161 |
| - f"Model '{model_to_use}' not found. Please try a different model." |
162 |
| - ) |
163 |
| - raise ValueError(error_msg) from e |
164 |
| - # Re-raise other errors |
165 |
| - raise |
| 160 | + _handle_model_not_found(e, model_to_use) |
166 | 161 |
|
167 | 162 |
|
168 | 163 | def reflection(state: OverallState, config: RunnableConfig) -> ReflectionState:
|
@@ -209,15 +204,7 @@ def reflection(state: OverallState, config: RunnableConfig) -> ReflectionState:
|
209 | 204 | "number_of_ran_queries": len(state["search_query"]),
|
210 | 205 | }
|
211 | 206 | except Exception as e:
|
212 |
| - # Check if this is a model not found error |
213 |
| - if "404" in str(e) and "models/" in str(e): |
214 |
| - # Return an error that the frontend can catch |
215 |
| - error_msg = ( |
216 |
| - f"Model '{reasoning_model}' not found. Please try a different model." |
217 |
| - ) |
218 |
| - raise ValueError(error_msg) from e |
219 |
| - # Re-raise other errors |
220 |
| - raise |
| 207 | + _handle_model_not_found(e, reasoning_model) |
221 | 208 |
|
222 | 209 |
|
223 | 210 | def evaluate_research(
|
@@ -305,15 +292,7 @@ def finalize_answer(state: OverallState, config: RunnableConfig):
|
305 | 292 | "sources_gathered": unique_sources,
|
306 | 293 | }
|
307 | 294 | except Exception as e:
|
308 |
| - # Check if this is a model not found error |
309 |
| - if "404" in str(e) and "models/" in str(e): |
310 |
| - # Return an error that the frontend can catch |
311 |
| - error_msg = ( |
312 |
| - f"Model '{reasoning_model}' not found. Please try a different model." |
313 |
| - ) |
314 |
| - raise ValueError(error_msg) from e |
315 |
| - # Re-raise other errors |
316 |
| - raise |
| 295 | + _handle_model_not_found(e, reasoning_model) |
317 | 296 |
|
318 | 297 |
|
319 | 298 | # Create our Agent Graph
|
|
0 commit comments