Complete logging fixes for all statements in threading service
Applied the extra parameter pattern to all remaining logging statements (11 more) to ensure consistency and prevent runtime errors when any code path is executed. This completes the fix for the entire file.
This commit is contained in:
parent
43d83a08d3
commit
468463997d
@ -98,8 +98,10 @@ class RateLimiter:
|
|||||||
if wait_time > 0:
|
if wait_time > 0:
|
||||||
logfire_logger.info(
|
logfire_logger.info(
|
||||||
f"Rate limiting: waiting {wait_time:.1f}s",
|
f"Rate limiting: waiting {wait_time:.1f}s",
|
||||||
tokens=estimated_tokens,
|
extra={
|
||||||
current_usage=self._get_current_usage(),
|
"tokens": estimated_tokens,
|
||||||
|
"current_usage": self._get_current_usage(),
|
||||||
|
}
|
||||||
)
|
)
|
||||||
await asyncio.sleep(wait_time)
|
await asyncio.sleep(wait_time)
|
||||||
return await self.acquire(estimated_tokens)
|
return await self.acquire(estimated_tokens)
|
||||||
@ -199,16 +201,20 @@ class MemoryAdaptiveDispatcher:
|
|||||||
workers = max(1, base // 2)
|
workers = max(1, base // 2)
|
||||||
logfire_logger.warning(
|
logfire_logger.warning(
|
||||||
"High memory usage detected, reducing workers",
|
"High memory usage detected, reducing workers",
|
||||||
memory_percent=metrics.memory_percent,
|
extra={
|
||||||
workers=workers,
|
"memory_percent": metrics.memory_percent,
|
||||||
|
"workers": workers,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
elif metrics.cpu_percent > self.config.cpu_threshold * 100:
|
elif metrics.cpu_percent > self.config.cpu_threshold * 100:
|
||||||
# Reduce workers when CPU is high
|
# Reduce workers when CPU is high
|
||||||
workers = max(1, base // 2)
|
workers = max(1, base // 2)
|
||||||
logfire_logger.warning(
|
logfire_logger.warning(
|
||||||
"High CPU usage detected, reducing workers",
|
"High CPU usage detected, reducing workers",
|
||||||
cpu_percent=metrics.cpu_percent,
|
extra={
|
||||||
workers=workers,
|
"cpu_percent": metrics.cpu_percent,
|
||||||
|
"workers": workers,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
elif metrics.memory_percent < 50 and metrics.cpu_percent < 50:
|
elif metrics.memory_percent < 50 and metrics.cpu_percent < 50:
|
||||||
# Increase workers when resources are available
|
# Increase workers when resources are available
|
||||||
@ -239,11 +245,13 @@ class MemoryAdaptiveDispatcher:
|
|||||||
|
|
||||||
logfire_logger.info(
|
logfire_logger.info(
|
||||||
"Starting adaptive processing",
|
"Starting adaptive processing",
|
||||||
items_count=len(items),
|
extra={
|
||||||
workers=optimal_workers,
|
"items_count": len(items),
|
||||||
mode=mode,
|
"workers": optimal_workers,
|
||||||
memory_percent=self.last_metrics.memory_percent,
|
"mode": mode,
|
||||||
cpu_percent=self.last_metrics.cpu_percent,
|
"memory_percent": self.last_metrics.memory_percent,
|
||||||
|
"cpu_percent": self.last_metrics.cpu_percent,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
# Track active workers
|
# Track active workers
|
||||||
@ -318,7 +326,8 @@ class MemoryAdaptiveDispatcher:
|
|||||||
del active_workers[worker_id]
|
del active_workers[worker_id]
|
||||||
|
|
||||||
logfire_logger.error(
|
logfire_logger.error(
|
||||||
f"Processing failed for item {index}", error=str(e), item_index=index
|
f"Processing failed for item {index}",
|
||||||
|
extra={"error": str(e), "item_index": index}
|
||||||
)
|
)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@ -334,10 +343,12 @@ class MemoryAdaptiveDispatcher:
|
|||||||
success_rate = len(successful_results) / len(items) * 100
|
success_rate = len(successful_results) / len(items) * 100
|
||||||
logfire_logger.info(
|
logfire_logger.info(
|
||||||
"Adaptive processing completed",
|
"Adaptive processing completed",
|
||||||
total_items=len(items),
|
extra={
|
||||||
successful=len(successful_results),
|
"total_items": len(items),
|
||||||
success_rate=f"{success_rate:.1f}%",
|
"successful": len(successful_results),
|
||||||
workers_used=optimal_workers,
|
"success_rate": f"{success_rate:.1f}%",
|
||||||
|
"workers_used": optimal_workers,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
return successful_results
|
return successful_results
|
||||||
@ -355,7 +366,8 @@ class WebSocketSafeProcessor:
|
|||||||
await websocket.accept()
|
await websocket.accept()
|
||||||
self.active_connections.append(websocket)
|
self.active_connections.append(websocket)
|
||||||
logfire_logger.info(
|
logfire_logger.info(
|
||||||
"WebSocket client connected", total_connections=len(self.active_connections)
|
"WebSocket client connected",
|
||||||
|
extra={"total_connections": len(self.active_connections)}
|
||||||
)
|
)
|
||||||
|
|
||||||
def disconnect(self, websocket: WebSocket):
|
def disconnect(self, websocket: WebSocket):
|
||||||
@ -363,7 +375,8 @@ class WebSocketSafeProcessor:
|
|||||||
if websocket in self.active_connections:
|
if websocket in self.active_connections:
|
||||||
self.active_connections.remove(websocket)
|
self.active_connections.remove(websocket)
|
||||||
logfire_logger.info(
|
logfire_logger.info(
|
||||||
"WebSocket client disconnected", remaining_connections=len(self.active_connections)
|
"WebSocket client disconnected",
|
||||||
|
extra={"remaining_connections": len(self.active_connections)}
|
||||||
)
|
)
|
||||||
|
|
||||||
async def broadcast_progress(self, message: dict[str, Any]):
|
async def broadcast_progress(self, message: dict[str, Any]):
|
||||||
@ -564,17 +577,20 @@ class ThreadingService:
|
|||||||
# Log system metrics
|
# Log system metrics
|
||||||
logfire_logger.info(
|
logfire_logger.info(
|
||||||
"System health check",
|
"System health check",
|
||||||
memory_percent=metrics.memory_percent,
|
extra={
|
||||||
cpu_percent=metrics.cpu_percent,
|
"memory_percent": metrics.memory_percent,
|
||||||
available_memory_gb=metrics.available_memory_gb,
|
"cpu_percent": metrics.cpu_percent,
|
||||||
active_threads=metrics.active_threads,
|
"available_memory_gb": metrics.available_memory_gb,
|
||||||
active_websockets=len(self.websocket_processor.active_connections),
|
"active_threads": metrics.active_threads,
|
||||||
|
"active_websockets": len(self.websocket_processor.active_connections),
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
# Alert on critical thresholds
|
# Alert on critical thresholds
|
||||||
if metrics.memory_percent > 90:
|
if metrics.memory_percent > 90:
|
||||||
logfire_logger.warning(
|
logfire_logger.warning(
|
||||||
"Critical memory usage", memory_percent=metrics.memory_percent
|
"Critical memory usage",
|
||||||
|
extra={"memory_percent": metrics.memory_percent}
|
||||||
)
|
)
|
||||||
# Force garbage collection
|
# Force garbage collection
|
||||||
gc.collect()
|
gc.collect()
|
||||||
@ -588,8 +604,10 @@ class ThreadingService:
|
|||||||
if metrics.active_threads > self.config.max_workers * 3:
|
if metrics.active_threads > self.config.max_workers * 3:
|
||||||
logfire_logger.warning(
|
logfire_logger.warning(
|
||||||
"High thread count detected",
|
"High thread count detected",
|
||||||
active_threads=metrics.active_threads,
|
extra={
|
||||||
max_expected=self.config.max_workers * 3,
|
"active_threads": metrics.active_threads,
|
||||||
|
"max_expected": self.config.max_workers * 3,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
await asyncio.sleep(self.config.health_check_interval)
|
await asyncio.sleep(self.config.health_check_interval)
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user