From d0130f8ef18336ea384646b63c8562f5a2855446 Mon Sep 17 00:00:00 2001 From: lrg913427-dot Date: Sun, 3 May 2026 07:55:39 +0800 Subject: [PATCH] feat: add configurable SDK log level (#3096) Add support for configuring the OpenAI SDK log level via: 1. `openai.set_log_level()` function for programmatic control - Accepts string levels: "debug", "info", "warning", "error", "critical" - Accepts numeric logging levels (e.g. logging.WARNING) - Case-insensitive string matching 2. `OPENAI_LOG_LEVEL` environment variable (new, recommended) - Takes precedence over the legacy `OPENAI_LOG` variable - Supports all standard log level names 3. Extended `OPENAI_LOG` environment variable (backwards compatible) - Now supports "warning", "error", "critical" in addition to "debug" and "info" Closes #3096 --- src/openai/__init__.py | 2 + src/openai/_utils/__init__.py | 1 + src/openai/_utils/_logs.py | 70 +++++++++++++--- tests/test_utils/test_logging.py | 134 +++++++++++++++++++++++++++++++ 4 files changed, 198 insertions(+), 9 deletions(-) diff --git a/src/openai/__init__.py b/src/openai/__init__.py index fc9675a8b5..d8a575f625 100644 --- a/src/openai/__init__.py +++ b/src/openai/__init__.py @@ -38,6 +38,7 @@ ) from ._base_client import DefaultHttpxClient, DefaultAioHttpClient, DefaultAsyncHttpxClient from ._utils._logs import setup_logging as _setup_logging +from ._utils._logs import set_log_level as set_log_level from ._legacy_response import HttpxBinaryResponseContent as HttpxBinaryResponseContent from .types.websocket_reconnection import ReconnectingEvent, ReconnectingOverrides @@ -80,6 +81,7 @@ "OpenAI", "AsyncOpenAI", "file_from_path", + "set_log_level", "BaseModel", "DEFAULT_TIMEOUT", "DEFAULT_MAX_RETRIES", diff --git a/src/openai/_utils/__init__.py b/src/openai/_utils/__init__.py index 52853aaf03..7874cdb7f8 100644 --- a/src/openai/_utils/__init__.py +++ b/src/openai/_utils/__init__.py @@ -1,4 +1,5 @@ from ._logs import SensitiveHeadersFilter as SensitiveHeadersFilter +from ._logs import set_log_level as set_log_level from ._path import path_template as path_template from ._sync import asyncify as asyncify from ._proxy import LazyProxy as LazyProxy diff --git a/src/openai/_utils/_logs.py b/src/openai/_utils/_logs.py index 376946933c..66e718a6e0 100644 --- a/src/openai/_utils/_logs.py +++ b/src/openai/_utils/_logs.py @@ -1,5 +1,6 @@ import os import logging +from typing import Union from typing_extensions import override from ._utils import is_dict @@ -10,6 +11,16 @@ SENSITIVE_HEADERS = {"api-key", "authorization"} +_LOG_LEVEL_MAP: dict[str, int] = { + "debug": logging.DEBUG, + "info": logging.INFO, + "warning": logging.WARNING, + "warn": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL, + "fatal": logging.CRITICAL, +} + def _basic_config() -> None: # e.g. [2023-10-05 14:12:26 - openai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" @@ -19,16 +30,57 @@ def _basic_config() -> None: ) +def _parse_log_level(value: str) -> int | None: + """Parse a log level string into a logging level constant. + + Accepts standard level names (case-insensitive) and numeric values. + Returns None if the value cannot be parsed. + """ + level = _LOG_LEVEL_MAP.get(value.lower()) + if level is not None: + return level + + # Try numeric values + try: + numeric = int(value) + if 0 <= numeric <= 100: + return numeric + except ValueError: + pass + + return None + + +def set_log_level(level: Union[int, str]) -> None: + """Set the log level for the OpenAI SDK loggers. + + Args: + level: A log level as a string (e.g. "debug", "info", "warning", "error", "critical") + or a numeric logging level (e.g. logging.DEBUG, logging.WARNING). + """ + if isinstance(level, str): + parsed = _parse_log_level(level) + if parsed is None: + raise ValueError( + f"Invalid log level: {level!r}. " + f"Expected one of: {', '.join(sorted(_LOG_LEVEL_MAP.keys()))} or a numeric level." + ) + level = parsed + + _basic_config() + logger.setLevel(level) + httpx_logger.setLevel(level) + + def setup_logging() -> None: - env = os.environ.get("OPENAI_LOG") - if env == "debug": - _basic_config() - logger.setLevel(logging.DEBUG) - httpx_logger.setLevel(logging.DEBUG) - elif env == "info": - _basic_config() - logger.setLevel(logging.INFO) - httpx_logger.setLevel(logging.INFO) + # OPENAI_LOG_LEVEL takes precedence over the legacy OPENAI_LOG env var + env = os.environ.get("OPENAI_LOG_LEVEL") or os.environ.get("OPENAI_LOG") + if env: + parsed = _parse_log_level(env) + if parsed is not None: + _basic_config() + logger.setLevel(parsed) + httpx_logger.setLevel(parsed) class SensitiveHeadersFilter(logging.Filter): diff --git a/tests/test_utils/test_logging.py b/tests/test_utils/test_logging.py index cc018012e2..92e4b77181 100644 --- a/tests/test_utils/test_logging.py +++ b/tests/test_utils/test_logging.py @@ -98,3 +98,137 @@ def test_standard_debug_msg(logger_with_filter: logging.Logger, caplog: pytest.L with caplog.at_level(logging.DEBUG): logger_with_filter.debug("Sending HTTP Request: %s %s", "POST", "chat/completions") assert caplog.messages[0] == "Sending HTTP Request: POST chat/completions" + + +class TestSetLogLevel: + """Tests for set_log_level and setup_logging.""" + + def test_set_log_level_string_debug(self) -> None: + from openai._utils._logs import set_log_level, logger, httpx_logger + + set_log_level("debug") + assert logger.level == logging.DEBUG + assert httpx_logger.level == logging.DEBUG + + def test_set_log_level_string_info(self) -> None: + from openai._utils._logs import set_log_level, logger, httpx_logger + + set_log_level("info") + assert logger.level == logging.INFO + assert httpx_logger.level == logging.INFO + + def test_set_log_level_string_warning(self) -> None: + from openai._utils._logs import set_log_level, logger, httpx_logger + + set_log_level("warning") + assert logger.level == logging.WARNING + assert httpx_logger.level == logging.WARNING + + def test_set_log_level_string_warn(self) -> None: + from openai._utils._logs import set_log_level, logger, httpx_logger + + set_log_level("warn") + assert logger.level == logging.WARNING + assert httpx_logger.level == logging.WARNING + + def test_set_log_level_string_error(self) -> None: + from openai._utils._logs import set_log_level, logger, httpx_logger + + set_log_level("error") + assert logger.level == logging.ERROR + assert httpx_logger.level == logging.ERROR + + def test_set_log_level_string_critical(self) -> None: + from openai._utils._logs import set_log_level, logger, httpx_logger + + set_log_level("critical") + assert logger.level == logging.CRITICAL + assert httpx_logger.level == logging.CRITICAL + + def test_set_log_level_case_insensitive(self) -> None: + from openai._utils._logs import set_log_level, logger, httpx_logger + + set_log_level("WARNING") + assert logger.level == logging.WARNING + assert httpx_logger.level == logging.WARNING + + def test_set_log_level_numeric(self) -> None: + from openai._utils._logs import set_log_level, logger, httpx_logger + + set_log_level(logging.WARNING) + assert logger.level == logging.WARNING + assert httpx_logger.level == logging.WARNING + + def test_set_log_level_invalid_string_raises(self) -> None: + from openai._utils._logs import set_log_level + + with pytest.raises(ValueError, match="Invalid log level"): + set_log_level("not_a_level") + + def test_setup_logging_respects_openai_log_level_env( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + from openai._utils._logs import setup_logging, logger, httpx_logger + + monkeypatch.setenv("OPENAI_LOG_LEVEL", "warning") + monkeypatch.delenv("OPENAI_LOG", raising=False) + setup_logging() + assert logger.level == logging.WARNING + assert httpx_logger.level == logging.WARNING + + def test_setup_logging_openai_log_level_takes_precedence( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + from openai._utils._logs import setup_logging, logger, httpx_logger + + monkeypatch.setenv("OPENAI_LOG_LEVEL", "error") + monkeypatch.setenv("OPENAI_LOG", "debug") + setup_logging() + assert logger.level == logging.ERROR + assert httpx_logger.level == logging.ERROR + + def test_setup_logging_legacy_openai_log_still_works( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + from openai._utils._logs import setup_logging, logger, httpx_logger + + monkeypatch.delenv("OPENAI_LOG_LEVEL", raising=False) + monkeypatch.setenv("OPENAI_LOG", "info") + setup_logging() + assert logger.level == logging.INFO + assert httpx_logger.level == logging.INFO + + def test_setup_logging_legacy_openai_log_warning( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + from openai._utils._logs import setup_logging, logger, httpx_logger + + monkeypatch.delenv("OPENAI_LOG_LEVEL", raising=False) + monkeypatch.setenv("OPENAI_LOG", "warning") + setup_logging() + assert logger.level == logging.WARNING + assert httpx_logger.level == logging.WARNING + + def test_setup_logging_no_env_does_not_set_level( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + from openai._utils._logs import setup_logging, logger, httpx_logger + + monkeypatch.delenv("OPENAI_LOG_LEVEL", raising=False) + monkeypatch.delenv("OPENAI_LOG", raising=False) + + # Save original levels + orig_logger_level = logger.level + orig_httpx_level = httpx_logger.level + + setup_logging() + + # Levels should not have changed + assert logger.level == orig_logger_level + assert httpx_logger.level == orig_httpx_level + + def test_set_log_level_importable_from_openai(self) -> None: + import openai + + assert hasattr(openai, "set_log_level") + assert callable(openai.set_log_level)