import asyncio import logging import time from datetime import datetime from http import HTTPStatus from typing import TYPE_CHECKING, Optional, Union import aiohttp if TYPE_CHECKING: from logging import _Level, LogRecord _log_level_to_discord_colors = { "CRITICAL": 0xFF0000, "ERROR": 0xCC0000, "WARNING": 0xFFCC00, logging.CRITICAL: 0xFF0000, logging.ERROR: 0xCC0000, logging.WARNING: 0xFFCC00, } class DiscordLogHandler(logging.Handler): """ A handler for Python's logging infrastructure that batches log reports and sends error/critical logs directly. """ def __init__(self, webhook: str, who_to_ping: Optional[list[Union[str, int]]] = None) -> None: super().__init__(logging.WARNING) self._webhook = webhook self._who_to_ping = who_to_ping or [] self._client = None self._is_bucketing = False self._bucket_start = datetime.now() self._bucket_data: dict["_Level", "LogRecord"] = { logging.WARNING: [], } self._tasks = set() def emit(self, record: "LogRecord") -> None: if record.levelno in {logging.CRITICAL, logging.ERROR}: return self._send_log_to_discord(record) if record.levelno not in self._bucket_data: return self._bucket_data[record.levelno].append(record) if not self._is_bucketing: self._is_bucketing = True self._bucket_start = datetime.now() async def schedule(): await asyncio.sleep(60) self._send_bucket_data() t = asyncio.ensure_future(schedule()) self._tasks.add(t) t.add_done_callback(self._tasks.discard) def _send_bucket_data(self): loglevels = sorted(self._bucket_data.keys(), reverse=True) log_lines = [] log_counts: dict[int, int] = {} for level in loglevels: log_counts[level] = 0 for record in self._bucket_data[level]: record_title = record.__dict__.get("title", record.name) log_counts[level] += 1 log_lines.append(f"{record_title} | {record.levelname} | {record.message}") log_summary = "\n".join(log_lines) if len(log_summary) > 1994: log_summary = f"{log_summary[:1991]}..." body = { "content": f"```{log_summary}```", "embeds": [ { "title": "ARTEMiS log summary", "fields": [ {"name": logging.getLevelName(level), "value": str(count), "inline": True} for level, count in log_counts.items() ], "description": f"Log summary for to ", "color": _log_level_to_discord_colors.get(loglevels[0], 0x000000), "timestamp": datetime.now().isoformat(), } ] } self._post_data(body) self._is_bucketing = False for level in loglevels: self._bucket_data[level] = [] def _send_log_to_discord(self, record: "LogRecord"): message = record.message if len(message) > 4090: message = record.message[:4087] + "..." body = { "content": "", "embeds": [ { "title": f"[{record.levelname}] {record.__dict__.get('title', record.name)}", "description": f"```{message}```", "color": _log_level_to_discord_colors.get(record.levelname, 0x000000), "timestamp": datetime.fromtimestamp(record.created).isoformat(), }, ], } if record.levelno == logging.CRITICAL: body["content"] = f"CRITICAL ERROR: {self._get_pings()}" if record.levelno == logging.ERROR: body["content"] = f"ERROR: {self._get_pings()}" self._post_data(body) def _get_pings(self) -> str: pings = [] for ping in self._who_to_ping: if isinstance(ping, int): pings.append(f"<@{ping}>") else: pings.append(str(ping)) return " ".join(pings) def _post_data(self, body: dict): t = asyncio.ensure_future(self._post_data_inner(body)) self._tasks.add(t) t.add_done_callback(self._tasks.discard) async def _post_data_inner(self, body: dict): scale_retry_debounce = 2 if self._client is None: self._client = aiohttp.ClientSession() while True: response = await self._client.post(self._webhook, json=body) if response.status == HTTPStatus.TOO_MANY_REQUESTS: data = await response.json() retry_after = data.get("retry_after") if isinstance(int, retry_after) or isinstance(float, retry_after): asyncio.sleep(retry_after * scale_retry_debounce) scale_retry_debounce = scale_retry_debounce ** 2 continue elif not response.ok: print(f"[ERROR] Failed to send log to Discord: {response.status_code} {response.text}") break else: break