fix: refactor correct import and add more logs

This commit is contained in:
2026-02-24 17:23:24 +00:00
parent 6feeeff4f3
commit 753b5c7871

204
main.py
View File

@@ -15,7 +15,7 @@ from google import genai
from google.genai import types as genai_types from google.genai import types as genai_types
from mcp.server.fastmcp import Context, FastMCP from mcp.server.fastmcp import Context, FastMCP
from .utils import Settings, _args, log_structured_entry from utils import Settings, _args, log_structured_entry
HTTP_TOO_MANY_REQUESTS = 429 HTTP_TOO_MANY_REQUESTS = 429
HTTP_SERVER_ERROR = 500 HTTP_SERVER_ERROR = 500
@@ -70,10 +70,21 @@ class GoogleCloudFileStorage:
""" """
if file_name in self._cache: if file_name in self._cache:
log_structured_entry(
"File retrieved from cache",
"INFO",
{"file": file_name, "bucket": self.bucket_name}
)
file_stream = io.BytesIO(self._cache[file_name]) file_stream = io.BytesIO(self._cache[file_name])
file_stream.name = file_name file_stream.name = file_name
return file_stream return file_stream
log_structured_entry(
"Starting file download from GCS",
"INFO",
{"file": file_name, "bucket": self.bucket_name}
)
storage_client = self._get_aio_storage() storage_client = self._get_aio_storage()
last_exception: Exception | None = None last_exception: Exception | None = None
@@ -85,11 +96,22 @@ class GoogleCloudFileStorage:
) )
file_stream = io.BytesIO(self._cache[file_name]) file_stream = io.BytesIO(self._cache[file_name])
file_stream.name = file_name file_stream.name = file_name
log_structured_entry(
"File downloaded successfully",
"INFO",
{
"file": file_name,
"bucket": self.bucket_name,
"size_bytes": len(self._cache[file_name]),
"attempt": attempt + 1
}
)
except TimeoutError as exc: except TimeoutError as exc:
last_exception = exc last_exception = exc
log_structured_entry( log_structured_entry(
f"Timeout downloading gs://{self.bucket_name}/{file_name} (attempt {attempt + 1}/{max_retries})" f"Timeout downloading gs://{self.bucket_name}/{file_name} (attempt {attempt + 1}/{max_retries})",
"WARNING" "WARNING",
{"error": str(exc)}
) )
except aiohttp.ClientResponseError as exc: except aiohttp.ClientResponseError as exc:
last_exception = exc last_exception = exc
@@ -98,22 +120,43 @@ class GoogleCloudFileStorage:
or exc.status >= HTTP_SERVER_ERROR or exc.status >= HTTP_SERVER_ERROR
): ):
log_structured_entry( log_structured_entry(
f"HTTP {exc.status} downloading gs://{self.bucket_name}/{file_name} (attempt {attempt + 1}/{max_retries})" f"HTTP {exc.status} downloading gs://{self.bucket_name}/{file_name} (attempt {attempt + 1}/{max_retries})",
"WARNING" "WARNING",
{"status": exc.status, "message": str(exc)}
) )
else: else:
log_structured_entry(
f"Non-retryable HTTP error downloading gs://{self.bucket_name}/{file_name}",
"ERROR",
{"status": exc.status, "message": str(exc)}
)
raise raise
else: else:
return file_stream return file_stream
if attempt < max_retries - 1: if attempt < max_retries - 1:
delay = 0.5 * (2**attempt) delay = 0.5 * (2**attempt)
log_structured_entry(
"Retrying file download",
"INFO",
{"file": file_name, "delay_seconds": delay}
)
await asyncio.sleep(delay) await asyncio.sleep(delay)
msg = ( msg = (
f"Failed to download gs://{self.bucket_name}/{file_name} " f"Failed to download gs://{self.bucket_name}/{file_name} "
f"after {max_retries} attempts" f"after {max_retries} attempts"
) )
log_structured_entry(
"File download failed after all retries",
"ERROR",
{
"file": file_name,
"bucket": self.bucket_name,
"max_retries": max_retries,
"last_error": str(last_exception)
}
)
raise TimeoutError(msg) from last_exception raise TimeoutError(msg) from last_exception
@@ -210,7 +253,13 @@ class GoogleCloudVectorSearch:
"Missing endpoint metadata. Call " "Missing endpoint metadata. Call "
"`configure_index_endpoint` before querying." "`configure_index_endpoint` before querying."
) )
log_structured_entry(
"Vector search query failed - endpoint not configured",
"ERROR",
{"error": msg}
)
raise RuntimeError(msg) raise RuntimeError(msg)
domain = self._endpoint_domain domain = self._endpoint_domain
endpoint_id = self._endpoint_name.split("/")[-1] endpoint_id = self._endpoint_name.split("/")[-1]
url = ( url = (
@@ -218,6 +267,18 @@ class GoogleCloudVectorSearch:
f"/locations/{self.location}" f"/locations/{self.location}"
f"/indexEndpoints/{endpoint_id}:findNeighbors" f"/indexEndpoints/{endpoint_id}:findNeighbors"
) )
log_structured_entry(
"Starting vector search query",
"INFO",
{
"deployed_index_id": deployed_index_id,
"neighbor_count": limit,
"endpoint_id": endpoint_id,
"embedding_dimension": len(query)
}
)
payload = { payload = {
"deployed_index_id": deployed_index_id, "deployed_index_id": deployed_index_id,
"queries": [ "queries": [
@@ -228,6 +289,7 @@ class GoogleCloudVectorSearch:
], ],
} }
try:
headers = await self._async_get_auth_headers() headers = await self._async_get_auth_headers()
session = self._get_aio_session() session = self._get_aio_session()
async with session.post( async with session.post(
@@ -238,10 +300,37 @@ class GoogleCloudVectorSearch:
if not response.ok: if not response.ok:
body = await response.text() body = await response.text()
msg = f"findNeighbors returned {response.status}: {body}" msg = f"findNeighbors returned {response.status}: {body}"
log_structured_entry(
"Vector search API request failed",
"ERROR",
{
"status": response.status,
"response_body": body,
"deployed_index_id": deployed_index_id
}
)
raise RuntimeError(msg) raise RuntimeError(msg)
data = await response.json() data = await response.json()
neighbors = data.get("nearestNeighbors", [{}])[0].get("neighbors", []) neighbors = data.get("nearestNeighbors", [{}])[0].get("neighbors", [])
log_structured_entry(
"Vector search API request successful",
"INFO",
{
"neighbors_found": len(neighbors),
"deployed_index_id": deployed_index_id
}
)
if not neighbors:
log_structured_entry(
"No neighbors found in vector search",
"WARNING",
{"deployed_index_id": deployed_index_id}
)
return []
# Fetch content for all neighbors
content_tasks = [] content_tasks = []
for neighbor in neighbors: for neighbor in neighbors:
datapoint_id = neighbor["datapoint"]["datapointId"] datapoint_id = neighbor["datapoint"]["datapointId"]
@@ -250,6 +339,12 @@ class GoogleCloudVectorSearch:
self.storage.async_get_file_stream(file_path), self.storage.async_get_file_stream(file_path),
) )
log_structured_entry(
"Fetching content for search results",
"INFO",
{"file_count": len(content_tasks)}
)
file_streams = await asyncio.gather(*content_tasks) file_streams = await asyncio.gather(*content_tasks)
results: list[SearchResult] = [] results: list[SearchResult] = []
for neighbor, stream in zip( for neighbor, stream in zip(
@@ -264,8 +359,29 @@ class GoogleCloudVectorSearch:
content=stream.read().decode("utf-8"), content=stream.read().decode("utf-8"),
), ),
) )
log_structured_entry(
"Vector search completed successfully",
"INFO",
{
"results_count": len(results),
"deployed_index_id": deployed_index_id
}
)
return results return results
except Exception as e:
log_structured_entry(
"Vector search query failed with exception",
"ERROR",
{
"error": str(e),
"error_type": type(e).__name__,
"deployed_index_id": deployed_index_id
}
)
raise
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# MCP Server # MCP Server
@@ -284,29 +400,103 @@ class AppContext:
@asynccontextmanager @asynccontextmanager
async def lifespan(_server: FastMCP) -> AsyncIterator[AppContext]: async def lifespan(_server: FastMCP) -> AsyncIterator[AppContext]:
"""Create and configure the vector-search client for the server lifetime.""" """Create and configure the vector-search client for the server lifetime."""
log_structured_entry(
"Initializing MCP server",
"INFO",
{
"project_id": cfg.project_id,
"location": cfg.location,
"bucket": cfg.bucket,
"index_name": cfg.index_name,
}
)
try:
# Initialize vector search client
log_structured_entry("Creating GoogleCloudVectorSearch client", "INFO")
vs = GoogleCloudVectorSearch( vs = GoogleCloudVectorSearch(
project_id=cfg.project_id, project_id=cfg.project_id,
location=cfg.location, location=cfg.location,
bucket=cfg.bucket, bucket=cfg.bucket,
index_name=cfg.index_name, index_name=cfg.index_name,
) )
# Configure endpoint
log_structured_entry(
"Configuring index endpoint",
"INFO",
{
"endpoint_name": cfg.endpoint_name,
"endpoint_domain": cfg.endpoint_domain,
}
)
vs.configure_index_endpoint( vs.configure_index_endpoint(
name=cfg.endpoint_name, name=cfg.endpoint_name,
public_domain=cfg.endpoint_domain, public_domain=cfg.endpoint_domain,
) )
# Initialize GenAI client
log_structured_entry(
"Creating GenAI client",
"INFO",
{"project_id": cfg.project_id, "location": cfg.location}
)
genai_client = genai.Client( genai_client = genai.Client(
vertexai=True, vertexai=True,
project=cfg.project_id, project=cfg.project_id,
location=cfg.location, location=cfg.location,
) )
# Validate credentials by testing a simple operation
log_structured_entry("Validating credentials with test embedding", "INFO")
try:
test_response = await genai_client.aio.models.embed_content(
model=cfg.embedding_model,
contents="test",
config=genai_types.EmbedContentConfig(
task_type="RETRIEVAL_QUERY",
),
)
if test_response and test_response.embeddings:
log_structured_entry(
"Credentials validated successfully",
"INFO",
{"embedding_dimension": len(test_response.embeddings[0].values)}
)
else:
log_structured_entry(
"Credential validation returned empty response",
"WARNING"
)
except Exception as e:
log_structured_entry(
"Failed to validate credentials - embedding test failed",
"ERROR",
{"error": str(e), "error_type": type(e).__name__}
)
raise
log_structured_entry("MCP server initialization complete", "INFO")
yield AppContext( yield AppContext(
vector_search=vs, vector_search=vs,
genai_client=genai_client, genai_client=genai_client,
settings=cfg, settings=cfg,
) )
except Exception as e:
log_structured_entry(
"Failed to initialize MCP server",
"ERROR",
{
"error": str(e),
"error_type": type(e).__name__,
}
)
raise
finally:
log_structured_entry("MCP server lifespan ending", "INFO")
cfg = Settings.model_validate({}) cfg = Settings.model_validate({})
@@ -372,9 +562,9 @@ async def knowledge_search(
"INFO", "INFO",
{ {
"embedding": f"{round((t_embed - t0) * 1000, 1)}ms", "embedding": f"{round((t_embed - t0) * 1000, 1)}ms",
"vector_serach": f"{round((t_search - t_embed) * 1000, 1)}ms", "vector_search": f"{round((t_search - t_embed) * 1000, 1)}ms",
"total": f"{round((t_search - t0) * 1000, 1)}ms", "total": f"{round((t_search - t0) * 1000, 1)}ms",
"chunks": {[s["id"] for s in search_results]} "chunks": [s["id"] for s in search_results]
} }
) )