diff --git a/codecarbon/core/neuron.py b/codecarbon/core/neuron.py new file mode 100644 index 000000000..80bfe8741 --- /dev/null +++ b/codecarbon/core/neuron.py @@ -0,0 +1,284 @@ +""" +Implements tracking for AWS Inferentia and Inferentia2 AI accelerator chips +via the Neuron sysfs interface. + +Sysfs power file location: +/sys/devices/virtual/neuron_device/neuron{i}/stats/power/utilization + +Sysfs power file format: +,,,, + +Where power values are percentages (0.00-100.00) of max TDP. +Updated every 60 seconds by the Neuron driver. + +IMPORTANT - Sampling frequency limitation: +The Neuron sysfs power file updates every 60 seconds. +codecarbon reads it every 15 seconds by default, meaning +the same value may be read up to 4 times between updates. + +Impact: +- Steady workloads: minimal impact, power is relatively constant +- Bursty workloads: may miss power spikes between updates +- Runs < 60 seconds: energy estimate may be based on a single sample +- Long runs: averages out over time, impact diminishes + +NOTE: Power is reported at device level, not per-process. +Accurate for exclusive instances, approximate for shared Neuron cores. +""" + +import glob +import os +from typing import Dict, List, Optional, Tuple + +from codecarbon.external.logger import logger + +# Maximum TDP per device type in watts. +# Only Inferentia (inf1) and Inferentia2 (inf2) are currently supported. +# Add other devices when their power specs are properly researched. +# TDP values are approximate and used to estimate watts from utilization%. +NEURON_DEVICE_TDP_WATTS = { + # long format from device_name sysfs file + "inferentia": 75, + "inferentia2": 100, + # shorthand format from instance_type sysfs file + "inf1": 75, + "inf2": 100, +} + + +def is_neuron_system() -> bool: + """ + Check if AWS Inferentia/Inferentia2 Neuron device is available + by checking if the Neuron sysfs directory exists. + Returns True if Neuron devices are present, False otherwise. + """ + return os.path.exists("/sys/devices/virtual/neuron_device") + + +class NeuronDevice: + """ + Represents a single AWS Inferentia/Inferentia2 Neuron device. + + Reads power utilization from Neuron sysfs at: + /sys/devices/virtual/neuron_device/neuron{i}/stats/power/utilization + + Power is reported as a percentage of max TDP, updated every 60 seconds. + Watts are estimated by multiplying utilization% by the device TDP. + + Accuracy limitations: + - Power derived from utilization% x TDP, not directly measured + - sysfs updates every 60 seconds, codecarbon reads every 15 seconds + - Device-level power only, not per-process attribution + - TDP values are approximate, not officially confirmed by AWS + for power tracking purposes + """ + + def __init__(self, device_path: str, device_index: int): + self._device_path = device_path + self._device_index = device_index + self._max_power_watts = self._get_max_power_watts() + + def _get_max_power_watts(self) -> float: + """ + Look up device TDP by reading device_name, instance_type, + or arch_type from the sysfs info directory. + Tries each file in order, returns first match. + Returns 0.0 if device is not supported or file cannot be read. + """ + try: + for filename in ["device_name", "instance_type", "arch_type"]: + path = os.path.join(self._device_path, "info", "architecture", filename) + if not os.path.exists(path): + continue + with open(path, "r") as f: + name = f.read().strip().lower() + tdp = NEURON_DEVICE_TDP_WATTS.get(name, 0.0) + if tdp > 0: + logger.debug( + f"NeuronDevice {self._device_index}: " + f"{filename}='{name}', TDP={tdp}W" + ) + return tdp + else: + logger.warning( + f"NeuronDevice {self._device_index}: " + f"device '{name}' is not currently supported. " + "Only Inferentia (inf1) and Inferentia2 (inf2) " + "are supported. Power will be reported as 0.0W." + ) + return 0.0 + logger.warning( + f"NeuronDevice {self._device_index}: " + "could not determine device type from sysfs info directory." + ) + return 0.0 + except Exception as e: + logger.debug( + f"NeuronDevice {self._device_index}: " + f"could not read device info: {e}" + ) + return 0.0 + + def _read_power_file(self) -> Optional[Tuple[str, float, float, float]]: + """ + Read and parse the Neuron sysfs power utilization file. + + Format: ,,,, + + Returns (status, min_pct, max_pct, avg_pct) or None on error. + """ + try: + power_file = os.path.join( + self._device_path, "stats", "power", "utilization" + ) + if not os.path.exists(power_file): + logger.debug( + f"NeuronDevice {self._device_index}: " + f"power file not found at {power_file}" + ) + return None + + with open(power_file, "r") as f: + content = f.read().strip() + + parts = content.split(",") + if len(parts) != 5: + logger.debug( + f"NeuronDevice {self._device_index}: " + f"unexpected power file format: {content}" + ) + return None + + status, _, min_pct, max_pct, avg_pct = parts + return status, float(min_pct), float(max_pct), float(avg_pct) + + except Exception as e: + logger.debug( + f"NeuronDevice {self._device_index}: " f"could not read power file: {e}" + ) + return None + + def get_utilization_pct(self) -> float: + """ + Returns the raw average power utilization percentage (0.00-100.00) + as reported directly by the Neuron sysfs interface. + This is the direct measured value with no estimation involved. + Returns 0.0 if status is not POWER_STATUS_VALID or on error. + """ + result = self._read_power_file() + if result is None: + return 0.0 + + status, _, _, avg_pct = result + + if status != "POWER_STATUS_VALID": + logger.debug( + f"NeuronDevice {self._device_index}: " + f"power status: {status}, returning 0.0%" + ) + return 0.0 + + logger.debug( + f"NeuronDevice {self._device_index}: " f"utilization={avg_pct:.2f}%" + ) + return avg_pct + + def get_power_watts(self) -> float: + """ + Returns estimated power in watts by multiplying utilization% + by the device TDP. + + NOTE: This is an estimation. For the raw measured value + use get_utilization_pct() instead. + Returns 0.0 if TDP is unknown or status is not POWER_STATUS_VALID. + """ + if self._max_power_watts == 0.0: + logger.debug( + f"NeuronDevice {self._device_index}: " + "TDP unknown, cannot estimate watts" + ) + return 0.0 + + result = self._read_power_file() + if result is None: + return 0.0 + + status, _, _, avg_pct = result + + if status != "POWER_STATUS_VALID": + logger.debug( + f"NeuronDevice {self._device_index}: " + f"power status: {status}, returning 0.0W" + ) + return 0.0 + + watts = (avg_pct / 100.0) * self._max_power_watts + logger.debug( + f"NeuronDevice {self._device_index}: " + f"avg={avg_pct:.2f}%, TDP={self._max_power_watts}W " + f"=> {watts:.2f}W" + ) + return watts + + def get_device_index(self) -> int: + return self._device_index + + +class AllNeuronDevices: + """ + Discovers and manages all AWS Inferentia/Inferentia2 Neuron devices + on the system by scanning the Neuron sysfs directory. + """ + + def __init__(self): + self._devices: List[NeuronDevice] = self._discover_devices() + logger.info(f"Found {len(self._devices)} Neuron device(s)") + + def _discover_devices(self) -> List[NeuronDevice]: + """ + Scan sysfs for Neuron devices and return a sorted list + of NeuronDevice objects. + Uses neuron[0-9]* glob to avoid matching neuron_core directories. + """ + base_path = "/sys/devices/virtual/neuron_device" + device_paths = sorted(glob.glob(os.path.join(base_path, "neuron[0-9]*"))) + devices = [] + for i, path in enumerate(device_paths): + if os.path.isdir(path): + devices.append(NeuronDevice(path, i)) + logger.info(f"Neuron device {i} found at {path}") + return devices + + @property + def device_count(self) -> int: + return len(self._devices) + + def get_total_power_watts(self) -> float: + """ + Sum estimated power in watts across all Neuron devices. + See NeuronDevice.get_power_watts() for accuracy limitations. + """ + return sum(d.get_power_watts() for d in self._devices) + + def get_total_utilization_pct(self) -> float: + """ + Average raw utilization percentage across all Neuron devices. + This is the direct measured value with no estimation involved. + Returns 0.0 if no devices are present. + """ + if not self._devices: + return 0.0 + return sum(d.get_utilization_pct() for d in self._devices) / len(self._devices) + + def get_device_details(self) -> List[Dict]: + """ + Return a list of dicts with per-device power and utilization. + """ + return [ + { + "device_index": d.get_device_index(), + "power_watts": d.get_power_watts(), + "utilization_pct": d.get_utilization_pct(), + } + for d in self._devices + ] diff --git a/codecarbon/core/resource_tracker.py b/codecarbon/core/resource_tracker.py index 67786189d..568ee0ec7 100644 --- a/codecarbon/core/resource_tracker.py +++ b/codecarbon/core/resource_tracker.py @@ -3,6 +3,7 @@ from codecarbon.core import cpu, gpu, powermetrics from codecarbon.core.config import normalize_gpu_ids +from codecarbon.core.neuron import is_neuron_system from codecarbon.core.util import ( detect_cpu_model, is_linux_os, @@ -10,13 +11,19 @@ is_mac_os, is_windows_os, ) -from codecarbon.external.hardware import CPU, GPU, MODE_CPU_LOAD, AppleSiliconChip +from codecarbon.external.hardware import ( + CPU, + GPU, + MODE_CPU_LOAD, + AppleSiliconChip, + NeuronChip, +) from codecarbon.external.logger import logger from codecarbon.external.ram import RAM class ResourceTracker: - cpu_tracker = gpu_tracker = ram_tracker = "Unspecified" + cpu_tracker = gpu_tracker = ram_tracker = neuron_tracker = "Unspecified" def __init__(self, tracker): self.tracker = tracker @@ -250,6 +257,21 @@ def set_GPU_tracking(self): self.tracker._conf.setdefault("gpu_count", 0) self.tracker._conf.setdefault("gpu_model", "") + def set_Neuron_tracking(self): + logger.info("[setup] Neuron Tracking...") + if is_neuron_system(): + logger.info("Tracking AWS Inferentia/Inferentia2 via Neuron sysfs") + neuron = NeuronChip() + self.tracker._hardware.append(neuron) + self.tracker._conf["neuron_count"] = neuron._devices.device_count + self.tracker._conf["neuron_model"] = neuron._model + self.neuron_tracker = "Neuron sysfs" + else: + logger.info("No Neuron device found.") + self.tracker._conf.setdefault("neuron_count", 0) + self.tracker._conf.setdefault("neuron_model", "") + self.neuron_tracker = "Unspecified" + def set_CPU_GPU_ram_tracking(self): """ Set up CPU, GPU and RAM tracking based on the user's configuration. @@ -258,11 +280,13 @@ def set_CPU_GPU_ram_tracking(self): self.set_RAM_tracking() self.set_CPU_tracking() self.set_GPU_tracking() + self.set_Neuron_tracking() logger.info( f"""The below tracking methods have been set up: RAM Tracking Method: {self.ram_tracker} CPU Tracking Method: {self.cpu_tracker} GPU Tracking Method: {self.gpu_tracker} + Neuron Tracking Method: {self.neuron_tracker} """ ) diff --git a/codecarbon/emissions_tracker.py b/codecarbon/emissions_tracker.py index 862eba2b4..bad2628ce 100644 --- a/codecarbon/emissions_tracker.py +++ b/codecarbon/emissions_tracker.py @@ -23,7 +23,7 @@ from codecarbon.core.units import Energy, Power, Time, Water from codecarbon.core.util import count_cpus, count_physical_cpus, suppress from codecarbon.external.geography import CloudMetadata, GeoMetadata -from codecarbon.external.hardware import CPU, GPU, AppleSiliconChip +from codecarbon.external.hardware import CPU, GPU, AppleSiliconChip, NeuronChip from codecarbon.external.logger import logger, set_logger_format, set_logger_level from codecarbon.external.ram import RAM from codecarbon.external.scheduler import PeriodicScheduler @@ -368,12 +368,18 @@ def __init__( self._gpu_utilization_history: List[float] = [] self._ram_utilization_history: List[float] = [] self._ram_used_history: List[float] = [] + self._cpu_temperature_history: List[float] = [] + self._gpu_temperature_history: List[float] = [] self._total_cpu_energy: Energy = Energy.from_energy(kWh=0) self._total_gpu_energy: Energy = Energy.from_energy(kWh=0) self._total_ram_energy: Energy = Energy.from_energy(kWh=0) self._cpu_power: Power = Power.from_watts(watts=0) self._gpu_power: Power = Power.from_watts(watts=0) self._ram_power: Power = Power.from_watts(watts=0) + self._total_neuron_energy: Energy = Energy.from_energy(kWh=0) + self._neuron_power: Power = Power.from_watts(watts=0) + self._neuron_power_sum: float = 0.0 + self._neuron_utilization_history: List[float] = [] # Running average tracking for power self._cpu_power_sum: float = 0.0 self._gpu_power_sum: float = 0.0 @@ -548,6 +554,9 @@ def start(self) -> None: self._ram_utilization_history.clear() self._ram_used_history.clear() self._gpu_utilization_history.clear() + self._cpu_temperature_history.clear() + self._gpu_temperature_history.clear() + self._neuron_utilization_history.clear() # Read initial energy for hardware for hardware in self._hardware: @@ -598,6 +607,9 @@ def start_task(self, task_name=None) -> None: self._ram_utilization_history.clear() self._ram_used_history.clear() self._gpu_utilization_history.clear() + self._cpu_temperature_history.clear() + self._gpu_temperature_history.clear() + self._neuron_utilization_history.clear() # Read initial energy for hardware for hardware in self._hardware: @@ -922,6 +934,28 @@ def _prepare_emissions_data(self) -> EmissionsData: tracking_mode=self._conf.get("tracking_mode"), pue=self._pue, wue=self._wue, + cpu_temperature=( + sum(self._cpu_temperature_history) / len(self._cpu_temperature_history) + if self._cpu_temperature_history + else 0.0 + ), + gpu_temperature=( + sum(self._gpu_temperature_history) / len(self._gpu_temperature_history) + if self._gpu_temperature_history + else 0.0 + ), + neuron_power=( + self._neuron_power_sum / self._power_measurement_count + if self._power_measurement_count > 0 + else self._neuron_power.W + ), + neuron_energy=self._total_neuron_energy.kWh, + neuron_utilization_pct=( + sum(self._neuron_utilization_history) + / len(self._neuron_utilization_history) + if self._neuron_utilization_history + else 0.0 + ), ) logger.debug(total_emissions) return total_emissions @@ -973,6 +1007,10 @@ def _monitor_power(self) -> None: self._ram_utilization_history.append(psutil.virtual_memory().percent) self._ram_used_history.append(psutil.virtual_memory().used / (1024**3)) + for hardware in self._hardware: + if isinstance(hardware, CPU): + self._cpu_temperature_history.append(hardware.get_cpu_temperature()) + # Collect GPU utilization metrics for hardware in self._hardware: if isinstance(hardware, GPU): @@ -980,13 +1018,23 @@ def _monitor_power(self) -> None: gpu_details = hardware.devices.get_gpu_details() for gpu_index, gpu_detail in enumerate(gpu_details): resolved_gpu_index = gpu_detail.get("gpu_index", gpu_index) - if ( - resolved_gpu_index in gpu_ids_to_monitor - and "gpu_utilization" in gpu_detail - ): - self._gpu_utilization_history.append( - gpu_detail["gpu_utilization"] - ) + if resolved_gpu_index in gpu_ids_to_monitor: + + if "gpu_utilization" in gpu_detail: + self._gpu_utilization_history.append( + gpu_detail["gpu_utilization"] + ) + + if "temperature" in gpu_detail: + self._gpu_temperature_history.append( + gpu_detail["temperature"] + ) + + for hardware in self._hardware: + if isinstance(hardware, NeuronChip): + self._neuron_utilization_history.append( + hardware._devices.get_total_utilization_pct() + ) def _do_measurements(self) -> None: for hardware in self._hardware: @@ -1052,6 +1100,14 @@ def _do_measurements(self) -> None: f"Energy consumed for all AppleSilicon GPUs : {self._total_gpu_energy.kWh:.6f} kWh" + f". Total GPU Power : {self._gpu_power.W} W" ) + elif isinstance(hardware, NeuronChip): + self._total_neuron_energy += energy + self._neuron_power = power + self._neuron_power_sum += power.W + logger.info( + f"Energy consumed for Neuron : {self._total_neuron_energy.kWh:.6f} kWh" + + f". Neuron Power : {self._neuron_power.W} W" + ) else: logger.error(f"Unknown hardware type: {hardware} ({type(hardware)})") h_time = time.perf_counter() - h_time diff --git a/codecarbon/external/hardware.py b/codecarbon/external/hardware.py index 8ac4de8f8..10b194222 100644 --- a/codecarbon/external/hardware.py +++ b/codecarbon/external/hardware.py @@ -13,6 +13,7 @@ from codecarbon.core.cpu import IntelPowerGadget, IntelRAPL from codecarbon.core.gpu import AllGPUDevices +from codecarbon.core.neuron import AllNeuronDevices from codecarbon.core.powermetrics import ApplePowermetrics from codecarbon.core.units import Energy, Power, Time from codecarbon.core.util import count_cpus, detect_cpu_model @@ -409,6 +410,40 @@ def monitor_power(self): cpu_power = self._get_power_from_cpus() self._power_history.append(cpu_power) + def get_cpu_temperature(self) -> float: + """ + Get average CPU temperature in Celsius. + Supported on Linux (Intel + AMD) and Windows Intel via Power Gadget. + Returns 0.0 if temperature cannot be read on the current platform. + """ + try: + if self._mode == "intel_power_gadget": + all_cpu_details = self._intel_interface.get_cpu_details() + for metric, value in all_cpu_details.items(): + if re.match(r"^CPU Temperature", metric): + return float(value) + return 0.0 + + elif self._mode in ["intel_rapl", MODE_CPU_LOAD, "constant"]: + temps = psutil.sensors_temperatures() + if not temps: + logger.debug( + "get_cpu_temperature: psutil.sensors_temperatures() " + "returned no data on this platform" + ) + return 0.0 + for key in ["coretemp", "k10temp", "cpu_thermal"]: + if key in temps: + readings = temps[key] + avg = sum(r.current for r in readings) / len(readings) + logger.debug(f"get_cpu_temperature: {key} avg = {avg:.1f}°C") + return avg + return 0.0 + + except Exception as e: + logger.debug(f"get_cpu_temperature: Could not read CPU temperature: {e}") + return 0.0 + def get_model(self): return self._model @@ -522,3 +557,47 @@ def from_utils( logger.warning("Could not read AppleSiliconChip model.") return cls(output_dir=output_dir, model=model, chip_part=chip_part) + + +@dataclass +class NeuronChip(BaseHardware): + """ + Tracks AWS Inferentia/Inferentia2 power consumption + via the Neuron sysfs interface. + + Power is estimated from utilization% x TDP. + Utilization% is the raw measured value from sysfs. + + Sampling limitation: Neuron sysfs updates every 60 seconds. + codecarbon reads every 15 seconds so the same value may be + read up to 4 times between updates. Energy estimates are most + accurate for steady workloads and runs longer than 60 seconds. + + NOTE: Neuron sysfs reports device-level power, not per-process. + Accurate for exclusive instances, approximate for shared Neuron cores. + """ + + def __init__(self): + self._devices = AllNeuronDevices() + self._model = "AWS Inferentia/Inferentia2" + logger.warning( + "Neuron power sysfs updates every 60 seconds. " + "codecarbon reads every 15 seconds so power readings " + "may be stale between updates. Energy estimates are most " + "accurate for runs longer than 60 seconds with steady workloads." + ) + + def __repr__(self) -> str: + return f"NeuronChip({self._model}, " f"{self._devices.device_count} device(s))" + + def total_power(self) -> Power: + """ + Returns total estimated power across all Neuron devices in watts. + Called every 15 seconds by _do_measurements() in tracker.py. + Power is estimated from utilization% x TDP. + """ + watts = self._devices.get_total_power_watts() + return Power.from_watts(watts) + + def description(self) -> str: + return repr(self) diff --git a/codecarbon/output_methods/emissions_data.py b/codecarbon/output_methods/emissions_data.py index 17544aa51..38e2a92b0 100644 --- a/codecarbon/output_methods/emissions_data.py +++ b/codecarbon/output_methods/emissions_data.py @@ -47,6 +47,11 @@ class EmissionsData: on_cloud: str = "N" pue: float = 1 wue: float = 0 + cpu_temperature: float = 0.0 # ADD + gpu_temperature: float = 0.0 # ADD + neuron_power: float = 0.0 + neuron_energy: float = 0.0 + neuron_utilization_pct: float = 0.0 @property def values(self) -> OrderedDict: @@ -110,6 +115,11 @@ class TaskEmissionsData: ram_utilization_percent: float = 0.0 ram_used_gb: float = 0.0 on_cloud: str = "N" + cpu_temperature: float = 0.0 + gpu_temperature: float = 0.0 + neuron_power: float = 0.0 + neuron_energy: float = 0.0 + neuron_utilization_pct: float = 0.0 @property def values(self) -> OrderedDict: diff --git a/docs/Contributions/cputemp.md b/docs/Contributions/cputemp.md new file mode 100644 index 000000000..c656cb258 --- /dev/null +++ b/docs/Contributions/cputemp.md @@ -0,0 +1,107 @@ +# Contributions + +Added a function in Hardware.py that tracks cpu temps live in Celsius, this covers issue 1008 + +### Added code + +``` python +def get_cpu_temperature(self) -> float: + """ + Get average CPU temperature in Celsius. + Supported on Linux (Intel + AMD) and Windows Intel via Power Gadget. + Returns 0.0 if temperature cannot be read on the current platform. + """ + try: + if self._mode == "intel_power_gadget": + all_cpu_details = self._intel_interface.get_cpu_details() + for metric, value in all_cpu_details.items(): + if re.match(r"^CPU Temperature", metric): + return float(value) + return 0.0 + + elif self._mode in ["intel_rapl", MODE_CPU_LOAD, "constant"]: + temps = psutil.sensors_temperatures() + if not temps: + logger.debug( + "get_cpu_temperature: psutil.sensors_temperatures() " + "returned no data on this platform" + ) + return 0.0 + for key in ["coretemp", "k10temp", "cpu_thermal"]: + if key in temps: + readings = temps[key] + avg = sum(r.current for r in readings) / len(readings) + logger.debug(f"get_cpu_temperature: {key} avg = {avg:.1f}°C") + return avg + return 0.0 + + except Exception as e: + logger.debug(f"get_cpu_temperature: Could not read CPU temperature: {e}") + return 0.0 +``` +### Added Workflow + +```python +name: Test Temperature Tracking + +on: + push: + branches: [ main ] + workflow_dispatch: + +jobs: + test-temperature: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + + - name: Install dependencies + run: | + pip install -e . + pip install pandas + + - name: Check sensors available + run: | + sudo apt-get install -y lm-sensors + python3 -c "import psutil; print('Sensors:', psutil.sensors_temperatures())" + + - name: Run temperature test + run: | + python3 -c " + import time + from codecarbon import EmissionsTracker + + tracker = EmissionsTracker( + project_name='temperature_test', + measure_power_secs=15, + save_to_file=True, + output_file='emissions_temp_test.csv', + log_level='debug' + ) + + tracker.start() + total = sum(range(10_000_000)) + time.sleep(30) + emissions = tracker.stop() + + print(f'Emissions: {emissions:.6f} kg CO2') + print(f'CPU temperature: {tracker.final_emissions_data.cpu_temperature:.1f}C') + print(f'GPU temperature: {tracker.final_emissions_data.gpu_temperature:.1f}C') + + import pandas as pd + df = pd.read_csv('emissions_temp_test.csv') + print('CSV columns:', df.columns.tolist()) + print('Temperature values:') + print(df[['cpu_temperature', 'gpu_temperature']]) + " +``` + +Allowed for CodeCarbon to track it and input it in to the CSV data set, shown in terminal below +![](../images/CpuTemp.png){.align-center width="700px" height="400px"} + diff --git a/docs/images/CpuTemp.png b/docs/images/CpuTemp.png new file mode 100644 index 000000000..463dc24f7 Binary files /dev/null and b/docs/images/CpuTemp.png differ diff --git a/requirements/requirements-api.txt b/requirements/requirements-api.txt index ce02b80ac..545532048 100644 --- a/requirements/requirements-api.txt +++ b/requirements/requirements-api.txt @@ -32,6 +32,10 @@ click==8.3.1 # rich-toolkit # typer # uvicorn +colorama==0.4.6 + # via + # click + # uvicorn cryptography==46.0.7 # via # authlib @@ -65,6 +69,8 @@ fastar==0.9.0 # via fastapi-cloud-cli fief-client==0.20.0 # via carbonserver (carbonserver/pyproject.toml) +greenlet==3.3.2 + # via sqlalchemy h11==0.16.0 # via # httpcore @@ -208,8 +214,6 @@ uvicorn==0.38.0 # fastapi # fastapi-cli # fastapi-cloud-cli -uvloop==0.22.1 - # via uvicorn watchfiles==1.1.1 # via uvicorn websockets==15.0.1 diff --git a/tests/test_cpu.py b/tests/test_cpu.py index 1e1308812..c18bcae2a 100644 --- a/tests/test_cpu.py +++ b/tests/test_cpu.py @@ -5,6 +5,7 @@ import unittest from unittest import mock +import psutil import pytest from codecarbon.core.config import normalize_gpu_ids @@ -66,6 +67,12 @@ def test_is_psutil_available_without_nice(self, mock_cpu_times): def test_is_psutil_not_available_on_exception(self, mock_cpu_times): self.assertFalse(is_psutil_available()) + @mock.patch("psutil.sensors_temperatures") + def psutil_returns_expected_temperature(self, mock_cpu_times): + mock_temp = mock.Mock() + mock_temp.return_value = {"coretemp": 50, "k10temp": 50, "cpu_thermal": 50} + self.assertEqual(psutil.sensors_temperatures(), 50) + class TestRAPLHelperFunctions(unittest.TestCase): def test_get_candidate_bases_for_custom_dir(self): diff --git a/tests/test_data/emissions_valid_headers.csv b/tests/test_data/emissions_valid_headers.csv index b7493c902..aaf9ad182 100644 --- a/tests/test_data/emissions_valid_headers.csv +++ b/tests/test_data/emissions_valid_headers.csv @@ -1,2 +1,2 @@ -timestamp,project_name,run_id,experiment_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,water_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,cpu_utilization_percent,gpu_utilization_percent,ram_utilization_percent,ram_used_gb,on_cloud,pue,wue +timestamp,project_name,run_id,experiment_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,water_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,cpu_utilization_percent,gpu_utilization_percent,ram_utilization_percent,ram_used_gb,on_cloud,pue,wue,cpu_temperature,gpu_temperature 2021-09-23T15:04:51,codecarbon,0a578547-1d6b-4e2f-be0c-7ad10f2f7c97,test,161.20380687713623,0.0004490989249167,0.0027859076880178,0.269999999999999,0.0,12.884901888000002,0.0,0,0.00057442898176,0.00057442898176,0.1,Morocco,MAR,casablanca-settat,,,macOS-10.15.7-x86_64-i386-64bit,3.8.0,2.1.3,12,Intel(R) Core(TM) i7-8850H CPU @ 2.60GHz,,,-7.9084,33.5932,,machine,0.0,0.0,0.0,0.0,N,1.0,0.0