diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,pas-common.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,pas-common.yaml index 63a82e7a8bf8..bbc82253f76b 100644 --- a/Documentation/devicetree/bindings/remoteproc/qcom,pas-common.yaml +++ b/Documentation/devicetree/bindings/remoteproc/qcom,pas-common.yaml @@ -77,6 +77,12 @@ properties: and devices related to the ADSP. unevaluatedProperties: false + cooling: + $ref: /schemas/thermal/qcom,qmi-cooling.yaml# + description: + Cooling subnode which represents the cooling devices exposed by the Modem. + unevaluatedProperties: false + required: - clocks - clock-names diff --git a/Documentation/devicetree/bindings/thermal/qcom,qmi-cooling.yaml b/Documentation/devicetree/bindings/thermal/qcom,qmi-cooling.yaml new file mode 100644 index 000000000000..90b46712d241 --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/qcom,qmi-cooling.yaml @@ -0,0 +1,99 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +# Copyright 2023 (c), Linaro Limited + +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/thermal/qcom,qmi-cooling.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm QMI based thermal mitigation (TMD) cooling devices. + +maintainers: + - Caleb Connolly + - Gaurav Kohli + +description: + Qualcomm QMI-based TMD cooling devices are used to mitigate thermal conditions + across multiple remote subsystems. These devices operate based on junction temperature + sensors (TSENS) associated with thermal zones for each subsystem. + + Each subnode corresponds to a control interface for a single instance of the TMD + service running on a remote subsystem. + +definitions: + tmd: + type: object + description: | + A single Thermal Mitigation Device exposed by a remote subsystem. + properties: + label: + maxItems: 1 + "#cooling-cells": + $ref: /schemas/thermal/thermal-cooling-devices.yaml#/properties/#cooling-cells + phandle: true + + required: + - label + - "#cooling-cells" + + additionalProperties: false + +properties: + compatible: + enum: + - qcom,qmi-cooling-modem + - qcom,qmi-cooling-cdsp + + vdd: + $ref: "#/definitions/tmd" + description: + Modem processor temperature TMD + properties: + label: + const: modem + +required: + - compatible + +allOf: + - if: + properties: + compatible: + contains: + const: qcom,qmi-cooling-cdsp + then: + properties: + cdsp_sw: + $ref: "#/definitions/tmd" + description: + CDSP software TMD + properties: + label: + const: cdsp_sw + +unevaluatedProperties: false + +examples: + - | + remoteproc-cdsp { + cooling { + compatible = "qcom,qmi-cooling-cdsp"; + + cdsp_sw0: cdsp_sw { + label = "cdsp_sw"; + #cooling-cells = <2>; + }; + }; + }; + + remoteproc-cdsp1 { + cooling { + compatible = "qcom,qmi-cooling-cdsp1"; + + cdsp_sw1: cdsp_sw { + label = "cdsp_sw"; + #cooling-cells = <2>; + }; + }; + }; +... diff --git a/MAINTAINERS b/MAINTAINERS index dc731d37c8fe..ce7b903d872f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -25924,6 +25924,14 @@ F: drivers/thermal/cpufreq_cooling.c F: drivers/thermal/cpuidle_cooling.c F: include/linux/cpu_cooling.h +THERMAL/REMOTEPROC_COOLING +M: Gaurav Kohli +L: linux-pm@vger.kernel.org +S: Supported +F: drivers/thermal/remoteproc_cooling.c +F: include/linux/remoteproc_cooling.h + + THERMAL/POWER_ALLOCATOR M: Lukasz Luba L: linux-pm@vger.kernel.org diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 2caadbbcf830..905a24b42fe6 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -124,6 +124,19 @@ config QCOM_PMIC_GLINK Say yes here to support USB-C and battery status on modern Qualcomm platforms. +config QCOM_QMI_COOLING + tristate "Qualcomm QMI cooling drivers" + depends on QCOM_RPROC_COMMON + depends on ARCH_QCOM || COMPILE_TEST + select QCOM_QMI_HELPERS + help + This enables the remote subsystem cooling devices. These cooling + devices will be used by Qualcomm chipset to place various remote + subsystem mitigations like remote processor passive mitigation, + remote subsystem voltage restriction at low temperatures etc. + The QMI cooling device will interface with remote subsystem + using Qualcomm remoteproc interface. + config QCOM_QMI_HELPERS tristate depends on NET diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index b7f1d2a57367..b6728f54944b 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink.o obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink_altmode.o obj-$(CONFIG_QCOM_PMIC_PDCHARGER_ULOG) += pmic_pdcharger_ulog.o CFLAGS_pmic_pdcharger_ulog.o := -I$(src) +obj-$(CONFIG_QCOM_QMI_COOLING) += qmi-cooling.o obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o qmi_helpers-y += qmi_encdec.o qmi_interface.o obj-$(CONFIG_QCOM_RAMP_CTRL) += ramp_controller.o diff --git a/drivers/soc/qcom/qmi-cooling.c b/drivers/soc/qcom/qmi-cooling.c new file mode 100644 index 000000000000..1a6afcb96bf6 --- /dev/null +++ b/drivers/soc/qcom/qmi-cooling.c @@ -0,0 +1,498 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017, The Linux Foundation + * Copyright (c) 2025, Linaro Limited + * + * QMI Thermal Mitigation Device (TMD) client driver. + * This driver provides an in-kernel client to handle hot and cold thermal + * mitigations for remote subsystems (modem and DSPs) running the TMD service. + * It doesn't implement any handling of reports from remote subsystems. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qmi-cooling.h" + +#define MODEM0_INSTANCE_ID 0x0 +#define ADSP_INSTANCE_ID 0x1 +#define CDSP_INSTANCE_ID 0x43 +#define SLPI_INSTANCE_ID 0x53 + +#define QMI_TMD_RESP_TIMEOUT msecs_to_jiffies(100) + +/** + * struct qmi_tmd_client - TMD client state + * @dev: Device associated with this client + * @name: Friendly name for the remote TMD service + * @handle: QMI connection handle + * @mutex: Lock to synchronise QMI communication + * @id: The QMI TMD service instance ID + * @cdev_list: The list of cooling devices (controls) enabled for this instance + * @svc_arrive_work: Work item for initialising the client when the TMD service + * starts. + * @connection_active: Whether or not we're connected to the QMI TMD service + */ +struct qmi_tmd_client { + struct device *dev; + const char *name; + struct qmi_handle handle; + struct mutex mutex; + u32 id; + struct list_head cdev_list; + struct work_struct svc_arrive_work; + bool connection_active; +}; + +/** + * struct qmi_tmd - A TMD cooling device + * @np: OF node associated with this control + * @type: The control type (exposed via sysfs) + * @qmi_name: The common name of this control shared by the remote subsystem + * @rproc_cdev: Remote processor cooling device handle + * @cur_state: The current cooling/warming/mitigation state + * @max_state: The maximum state + * @client: The TMD client instance this control is associated with + */ +struct qmi_tmd { + struct device_node *np; + const char *type; + char qmi_name[QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 + 1]; + struct list_head node; + struct remoteproc_cdev *rproc_cdev; + unsigned int cur_state; + unsigned int max_state; + struct qmi_tmd_client *client; +}; + +/** + * struct qmi_instance_id - QMI instance match data + * @id: The QMI instance ID + * @name: Friendly name for this instance + */ +struct qmi_instance_data { + u32 id; + const char *name; +}; + +/* Notify the remote subsystem of the requested cooling state */ +static int qmi_tmd_send_state_request(struct qmi_tmd *tmd) +{ + struct tmd_set_mitigation_level_resp_msg_v01 tmd_resp = { 0 }; + struct tmd_set_mitigation_level_req_msg_v01 req = { 0 }; + struct qmi_tmd_client *client; + struct qmi_txn txn; + int ret = 0; + + client = tmd->client; + + guard(mutex)(&client->mutex); + + /* + * This function is called by qmi_set_cur_state() which does not know if + * the QMI service is actually online. If it isn't then we noop here. + * The state is cached in tmd->cur_state and will be broadcast via + * qmi_tmd_init_control() when the service comes up. + */ + if (!client->connection_active) + return 0; + + strscpy(req.mitigation_dev_id.mitigation_dev_id, tmd->qmi_name, + QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 + 1); + req.mitigation_level = tmd->cur_state; + + ret = qmi_txn_init(&client->handle, &txn, + tmd_set_mitigation_level_resp_msg_v01_ei, &tmd_resp); + if (ret < 0) { + dev_err(client->dev, "qmi set state %d txn init failed for %s ret %d\n", + tmd->cur_state, tmd->type, ret); + return ret; + } + + ret = qmi_send_request(&client->handle, NULL, &txn, + QMI_TMD_SET_MITIGATION_LEVEL_REQ_V01, + TMD_SET_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN, + tmd_set_mitigation_level_req_msg_v01_ei, &req); + if (ret < 0) { + dev_err(client->dev, "qmi set state %d txn send failed for %s ret %d\n", + tmd->cur_state, tmd->type, ret); + qmi_txn_cancel(&txn); + return ret; + } + + ret = qmi_txn_wait(&txn, QMI_TMD_RESP_TIMEOUT); + if (ret < 0) { + dev_err(client->dev, "qmi set state %d txn wait failed for %s ret %d\n", + tmd->cur_state, tmd->type, ret); + return ret; + } + + if (tmd_resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ret = -tmd_resp.resp.result; + dev_err(client->dev, "qmi set state %d NOT success for %s ret %d\n", + tmd->cur_state, tmd->type, ret); + return ret; + } + + dev_dbg(client->dev, "Requested state %d/%d for %s\n", tmd->cur_state, + tmd->max_state, tmd->type); + + return 0; +} + +static int qmi_get_max_level(void *devdata, unsigned long *level) +{ + struct qmi_tmd *tmd = devdata; + + if (!tmd) + return -EINVAL; + + *level = tmd->max_state; + + return 0; +} + +static int qmi_get_cur_level(void *devdata, unsigned long *level) +{ + struct qmi_tmd *tmd = devdata; + + if (!tmd) + return -EINVAL; + + *level = tmd->cur_state; + + return 0; +} + +static int qmi_set_cur_level(void *devdata, unsigned long level) +{ + struct qmi_tmd *tmd = devdata; + + if (!tmd) + return -EINVAL; + + if (level > tmd->max_state) + return -EINVAL; + + if (tmd->cur_state == level) + return 0; + + tmd->cur_state = level; + + return qmi_tmd_send_state_request(tmd); +} + +static const struct remoteproc_cooling_ops qmi_rproc_ops = { + .get_max_level = qmi_get_max_level, + .get_cur_level = qmi_get_cur_level, + .set_cur_level = qmi_set_cur_level, +}; + +static int qmi_register_cooling_device(struct qmi_tmd *tmd) +{ + struct remoteproc_cdev *rproc_cdev; + + rproc_cdev = remoteproc_cooling_register(tmd->np, + tmd->type, + &qmi_rproc_ops, + tmd); + + if (IS_ERR(rproc_cdev)) + return dev_err_probe(tmd->client->dev, PTR_ERR(rproc_cdev), + "Failed to register cooling device %s\n", + tmd->qmi_name); + + tmd->rproc_cdev = rproc_cdev; + return 0; +} + +/* + * Init a single TMD control by registering a cooling device for it, or + * synchronising state with the remote subsystem if recovering from a service + * restart. This is called when the TMD service starts up. + */ +static int qmi_tmd_init_control(struct qmi_tmd_client *client, const char *label, + u8 max_state) +{ + struct qmi_tmd *tmd = NULL; + + list_for_each_entry(tmd, &client->cdev_list, node) + if (!strncasecmp(tmd->qmi_name, label, + QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 + 1)) + goto found; + + dev_dbg(client->dev, + "TMD '%s' available in firmware but not specified in DT\n", + label); + return 0; + +found: + tmd->max_state = max_state; + /* + * If the cooling device already exists then the QMI service went away and + * came back. So just make sure the current cooling device state is + * reflected on the remote side and then return. + */ + if (tmd->rproc_cdev) + return qmi_tmd_send_state_request(tmd); + + return qmi_register_cooling_device(tmd); +} + +/* + * When the QMI service starts up on a remote subsystem this function will fetch + * the list of TMDs on the subsystem, match it to the TMDs specified in devicetree + * and call qmi_tmd_init_control() for each + */ +static void qmi_tmd_svc_arrive(struct work_struct *work) +{ + struct qmi_tmd_client *client = + container_of(work, struct qmi_tmd_client, svc_arrive_work); + + struct tmd_get_mitigation_device_list_req_msg_v01 req = { 0 }; + struct tmd_get_mitigation_device_list_resp_msg_v01 *resp __free(kfree); + int ret = 0, i; + struct qmi_txn txn; + + /* resp struct is 1.1kB, allocate it on the heap. */ + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) + return; + + /* Get a list of TMDs supported by the remoteproc */ + scoped_guard(mutex, &client->mutex) { + ret = qmi_txn_init(&client->handle, &txn, + tmd_get_mitigation_device_list_resp_msg_v01_ei, resp); + if (ret < 0) { + dev_err(client->dev, + "Transaction init error for instance_id: %#x ret %d\n", + client->id, ret); + return; + } + + ret = qmi_send_request(&client->handle, NULL, &txn, + QMI_TMD_GET_MITIGATION_DEVICE_LIST_REQ_V01, + TMD_GET_MITIGATION_DEVICE_LIST_REQ_MSG_V01_MAX_MSG_LEN, + tmd_get_mitigation_device_list_req_msg_v01_ei, &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + return; + } + + ret = qmi_txn_wait(&txn, QMI_TMD_RESP_TIMEOUT); + if (ret < 0) { + dev_err(client->dev, "Transaction wait error for client %#x ret:%d\n", + client->id, ret); + return; + } + if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { + ret = resp->resp.result; + dev_err(client->dev, "Failed to get device list for client %#x ret:%d\n", + client->id, ret); + return; + } + + client->connection_active = true; + } + + for (i = 0; i < resp->mitigation_device_list_len; i++) { + struct tmd_mitigation_dev_list_type_v01 *device = + &resp->mitigation_device_list[i]; + + ret = qmi_tmd_init_control(client, + device->mitigation_dev_id.mitigation_dev_id, + device->max_mitigation_level); + if (ret) + break; + } +} + +static void thermal_qmi_net_reset(struct qmi_handle *qmi) +{ + struct qmi_tmd_client *client = container_of(qmi, struct qmi_tmd_client, handle); + struct qmi_tmd *tmd = NULL; + + list_for_each_entry(tmd, &client->cdev_list, node) { + qmi_tmd_send_state_request(tmd); + } +} + +static void thermal_qmi_del_server(struct qmi_handle *qmi, struct qmi_service *service) +{ + struct qmi_tmd_client *client = container_of(qmi, struct qmi_tmd_client, handle); + + scoped_guard(mutex, &client->mutex) + client->connection_active = false; +} + +static int thermal_qmi_new_server(struct qmi_handle *qmi, struct qmi_service *service) +{ + struct qmi_tmd_client *client = container_of(qmi, struct qmi_tmd_client, handle); + struct sockaddr_qrtr sq = { AF_QIPCRTR, service->node, service->port }; + + scoped_guard(mutex, &client->mutex) + kernel_connect(qmi->sock, (struct sockaddr_unsized *)&sq, sizeof(sq), 0); + + queue_work(system_highpri_wq, &client->svc_arrive_work); + + return 0; +} + +static struct qmi_ops thermal_qmi_event_ops = { + .new_server = thermal_qmi_new_server, + .del_server = thermal_qmi_del_server, + .net_reset = thermal_qmi_net_reset, +}; + +static void qmi_tmd_cleanup(struct qmi_tmd_client *client) +{ + struct qmi_tmd *tmd, *c_next; + + guard(mutex)(&client->mutex); + + client->connection_active = false; + + qmi_handle_release(&client->handle); + cancel_work(&client->svc_arrive_work); + list_for_each_entry_safe(tmd, c_next, &client->cdev_list, node) { + if (tmd->rproc_cdev) + remoteproc_cooling_unregister(tmd->rproc_cdev); + + list_del(&tmd->node); + } +} + +/* Parse the controls and allocate a qmi_tmd for each of them */ +static int qmi_tmd_alloc_cdevs(struct qmi_tmd_client *client) +{ + struct device *dev = client->dev; + struct qmi_tmd *tmd; + struct device_node *subnode, *node = dev->of_node; + int ret; + + for_each_available_child_of_node(node, subnode) { + const char *name; + + tmd = devm_kzalloc(dev, sizeof(*tmd), GFP_KERNEL); + if (!tmd) + return dev_err_probe(client->dev, -ENOMEM, + "Couldn't allocate tmd\n"); + + tmd->type = devm_kasprintf(client->dev, GFP_KERNEL, "%s:%s", + client->name, subnode->name); + if (!tmd->type) + return dev_err_probe(dev, -ENOMEM, + "Couldn't allocate cooling device name\n"); + + if (of_property_read_string(subnode, "label", &name)) { + return dev_err_probe(client->dev, -EINVAL, + "Failed to parse dev name for %s\n", + subnode->name); + } + + ret = strscpy(tmd->qmi_name, name, + QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 + 1); + if (ret == -E2BIG) { + return dev_err_probe(dev, -EINVAL, "TMD label %s is too long\n", + name); + } + + tmd->client = client; + tmd->np = subnode; + tmd->cur_state = 0; + list_add(&tmd->node, &client->cdev_list); + } + + if (list_empty(&client->cdev_list)) + return dev_err_probe(client->dev, -EINVAL, + "No cooling devices specified for client %s (%#x)\n", + client->name, client->id); + + return 0; +} + +static int qmi_tmd_client_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct qmi_tmd_client *client; + const struct qmi_instance_data *match; + int ret; + client = devm_kzalloc(dev, sizeof(*client), GFP_KERNEL); + if (!client) + return -ENOMEM; + + client->dev = dev; + + match = of_device_get_match_data(dev); + if (!match) + return dev_err_probe(dev, -EINVAL, "No match data\n"); + + client->id = match->id; + client->name = match->name; + + mutex_init(&client->mutex); + INIT_LIST_HEAD(&client->cdev_list); + INIT_WORK(&client->svc_arrive_work, qmi_tmd_svc_arrive); + + ret = qmi_tmd_alloc_cdevs(client); + if (ret) + return ret; + + platform_set_drvdata(pdev, client); + + ret = qmi_handle_init(&client->handle, + TMD_GET_MITIGATION_DEVICE_LIST_RESP_MSG_V01_MAX_MSG_LEN, + &thermal_qmi_event_ops, NULL); + if (ret < 0) + return dev_err_probe(client->dev, ret, "QMI handle init failed for client %#x\n", + client->id); + + ret = qmi_add_lookup(&client->handle, TMD_SERVICE_ID_V01, TMD_SERVICE_VERS_V01, + client->id); + if (ret < 0) { + qmi_handle_release(&client->handle); + return dev_err_probe(client->dev, ret, "QMI register failed for client 0x%x\n", + client->id); + } + + return 0; +} + +static void qmi_tmd_client_remove(struct platform_device *pdev) +{ + struct qmi_tmd_client *client = platform_get_drvdata(pdev); + + qmi_tmd_cleanup(client); +} + +static const struct of_device_id qmi_tmd_device_table[] = { + { + .compatible = "qcom,qmi-cooling-cdsp", + .data = &((struct qmi_instance_data) { CDSP_INSTANCE_ID, "cdsp" }), + }, + {} +}; +MODULE_DEVICE_TABLE(of, qmi_tmd_device_table); + +static struct platform_driver qmi_tmd_device_driver = { + .probe = qmi_tmd_client_probe, + .remove = qmi_tmd_client_remove, + .driver = { + .name = "qcom-qmi-cooling", + .of_match_table = qmi_tmd_device_table, + }, +}; + +module_platform_driver(qmi_tmd_device_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Qualcomm QMI Thermal Mitigation Device driver"); diff --git a/drivers/soc/qcom/qmi-cooling.h b/drivers/soc/qcom/qmi-cooling.h new file mode 100644 index 000000000000..f46b827b4ce6 --- /dev/null +++ b/drivers/soc/qcom/qmi-cooling.h @@ -0,0 +1,428 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017, The Linux Foundation + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef __QCOM_COOLING_H__ +#define __QCOM_COOLING_H__ + +#include + +#define TMD_SERVICE_ID_V01 0x18 +#define TMD_SERVICE_VERS_V01 0x01 + +#define QMI_TMD_GET_MITIGATION_DEVICE_LIST_RESP_V01 0x0020 +#define QMI_TMD_GET_MITIGATION_LEVEL_REQ_V01 0x0022 +#define QMI_TMD_GET_SUPPORTED_MSGS_REQ_V01 0x001E +#define QMI_TMD_SET_MITIGATION_LEVEL_REQ_V01 0x0021 +#define QMI_TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_V01 0x0023 +#define QMI_TMD_GET_SUPPORTED_MSGS_RESP_V01 0x001E +#define QMI_TMD_SET_MITIGATION_LEVEL_RESP_V01 0x0021 +#define QMI_TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_V01 0x0024 +#define QMI_TMD_MITIGATION_LEVEL_REPORT_IND_V01 0x0025 +#define QMI_TMD_GET_MITIGATION_LEVEL_RESP_V01 0x0022 +#define QMI_TMD_GET_SUPPORTED_FIELDS_REQ_V01 0x001F +#define QMI_TMD_GET_MITIGATION_DEVICE_LIST_REQ_V01 0x0020 +#define QMI_TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_V01 0x0023 +#define QMI_TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_V01 0x0024 +#define QMI_TMD_GET_SUPPORTED_FIELDS_RESP_V01 0x001F + +#define QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 32 +#define QMI_TMD_MITIGATION_DEV_LIST_MAX_V01 32 + +struct tmd_mitigation_dev_id_type_v01 { + char mitigation_dev_id[QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 + 1]; +}; + +static const struct qmi_elem_info tmd_mitigation_dev_id_type_v01_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct tmd_mitigation_dev_id_type_v01, + mitigation_dev_id), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct tmd_mitigation_dev_list_type_v01 { + struct tmd_mitigation_dev_id_type_v01 mitigation_dev_id; + uint8_t max_mitigation_level; +}; + +static const struct qmi_elem_info tmd_mitigation_dev_list_type_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct tmd_mitigation_dev_list_type_v01, + mitigation_dev_id), + .ei_array = tmd_mitigation_dev_id_type_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct tmd_mitigation_dev_list_type_v01, + max_mitigation_level), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct tmd_get_mitigation_device_list_req_msg_v01 { + char placeholder; +}; + +#define TMD_GET_MITIGATION_DEVICE_LIST_REQ_MSG_V01_MAX_MSG_LEN 0 +const struct qmi_elem_info tmd_get_mitigation_device_list_req_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct tmd_get_mitigation_device_list_resp_msg_v01 { + struct qmi_response_type_v01 resp; + uint8_t mitigation_device_list_valid; + uint32_t mitigation_device_list_len; + struct tmd_mitigation_dev_list_type_v01 + mitigation_device_list[QMI_TMD_MITIGATION_DEV_LIST_MAX_V01]; +}; + +#define TMD_GET_MITIGATION_DEVICE_LIST_RESP_MSG_V01_MAX_MSG_LEN 1099 +static const struct qmi_elem_info tmd_get_mitigation_device_list_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct tmd_get_mitigation_device_list_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct tmd_get_mitigation_device_list_resp_msg_v01, + mitigation_device_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct tmd_get_mitigation_device_list_resp_msg_v01, + mitigation_device_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_TMD_MITIGATION_DEV_LIST_MAX_V01, + .elem_size = sizeof(struct tmd_mitigation_dev_list_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct tmd_get_mitigation_device_list_resp_msg_v01, + mitigation_device_list), + .ei_array = tmd_mitigation_dev_list_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct tmd_set_mitigation_level_req_msg_v01 { + struct tmd_mitigation_dev_id_type_v01 mitigation_dev_id; + uint8_t mitigation_level; +}; + +#define TMD_SET_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 40 +static const struct qmi_elem_info tmd_set_mitigation_level_req_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct tmd_set_mitigation_level_req_msg_v01, + mitigation_dev_id), + .ei_array = tmd_mitigation_dev_id_type_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct tmd_set_mitigation_level_req_msg_v01, + mitigation_level), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct tmd_set_mitigation_level_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define TMD_SET_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 7 +static const struct qmi_elem_info tmd_set_mitigation_level_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct tmd_set_mitigation_level_resp_msg_v01, resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct tmd_get_mitigation_level_req_msg_v01 { + struct tmd_mitigation_dev_id_type_v01 mitigation_device; +}; +#define TMD_GET_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 36 + +static const struct qmi_elem_info tmd_get_mitigation_level_req_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct tmd_get_mitigation_level_req_msg_v01, + mitigation_device), + .ei_array = tmd_mitigation_dev_id_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct tmd_get_mitigation_level_resp_msg_v01 { + struct qmi_response_type_v01 resp; + uint8_t current_mitigation_level_valid; + uint8_t current_mitigation_level; + uint8_t requested_mitigation_level_valid; + uint8_t requested_mitigation_level; +}; + +#define TMD_GET_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 15 +static const struct qmi_elem_info tmd_get_mitigation_level_resp_msg_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct tmd_get_mitigation_level_resp_msg_v01, resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct tmd_get_mitigation_level_resp_msg_v01, + current_mitigation_level_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct tmd_get_mitigation_level_resp_msg_v01, + current_mitigation_level), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct tmd_get_mitigation_level_resp_msg_v01, + requested_mitigation_level_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct tmd_get_mitigation_level_resp_msg_v01, + requested_mitigation_level), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct tmd_register_notification_mitigation_level_req_msg_v01 { + struct tmd_mitigation_dev_id_type_v01 mitigation_device; +}; + +#define TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 36 +static const struct qmi_elem_info + tmd_register_notification_mitigation_level_req_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct tmd_register_notification_mitigation_level_req_msg_v01, + mitigation_device), + .ei_array = tmd_mitigation_dev_id_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, + }; + +struct tmd_register_notification_mitigation_level_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 7 +static const struct qmi_elem_info + tmd_register_notification_mitigation_level_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct tmd_register_notification_mitigation_level_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, + }; + +struct tmd_deregister_notification_mitigation_level_req_msg_v01 { + struct tmd_mitigation_dev_id_type_v01 mitigation_device; +}; + +#define TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 36 +static const struct qmi_elem_info + tmd_deregister_notification_mitigation_level_req_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct tmd_deregister_notification_mitigation_level_req_msg_v01, + mitigation_device), + .ei_array = tmd_mitigation_dev_id_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, + }; + +struct tmd_deregister_notification_mitigation_level_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 7 +static const struct qmi_elem_info + tmd_deregister_notification_mitigation_level_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct tmd_deregister_notification_mitigation_level_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, + }; + +struct tmd_mitigation_level_report_ind_msg_v01 { + struct tmd_mitigation_dev_id_type_v01 mitigation_device; + uint8_t current_mitigation_level; +}; + +#define TMD_MITIGATION_LEVEL_REPORT_IND_MSG_V01_MAX_MSG_LEN 40 +static const struct qmi_elem_info tmd_mitigation_level_report_ind_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct tmd_mitigation_level_report_ind_msg_v01, + mitigation_device), + .ei_array = tmd_mitigation_dev_id_type_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct tmd_mitigation_level_report_ind_msg_v01, + current_mitigation_level), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +#endif /* __QMI_COOLING_INTERNAL_H__ */ diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index b10080d61860..31e92be34387 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -229,6 +229,17 @@ config PCIE_THERMAL If you want this support, you should say Y here. + +config REMOTEPROC_THERMAL + bool "Remote processor cooling support" + help + This implements a generic cooling mechanism for remote processors + (modem, DSP, etc.) that allows vendor-specific implementations to + register thermal cooling devices and provide callbacks for thermal + mitigation. + + If you want this support, you should say Y here. + config THERMAL_EMULATION bool "Thermal emulation mode support" help diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index bb21e7ea7fc6..ae747dde54fe 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -34,6 +34,8 @@ thermal_sys-$(CONFIG_DEVFREQ_THERMAL) += devfreq_cooling.o thermal_sys-$(CONFIG_PCIE_THERMAL) += pcie_cooling.o +thermal_sys-$(CONFIG_REMOTEPROC_THERMAL) += remoteproc_cooling.o + obj-$(CONFIG_K3_THERMAL) += k3_bandgap.o k3_j72xx_bandgap.o # platform thermal drivers obj-y += broadcom/ diff --git a/drivers/thermal/remoteproc_cooling.c b/drivers/thermal/remoteproc_cooling.c new file mode 100644 index 000000000000..a1f948cbde0f --- /dev/null +++ b/drivers/thermal/remoteproc_cooling.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Remote Processor Cooling Device + * + * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define REMOTEPROC_PREFIX "rproc_" + +struct remoteproc_cooling_ops { + int (*get_max_level)(void *devdata, unsigned long *level); + int (*get_cur_level)(void *devdata, unsigned long *level); + int (*set_cur_level)(void *devdata, unsigned long level); +}; + +/** + * struct remoteproc_cdev - Remote processor cooling device + * @cdev: Thermal cooling device handle + * @ops: Vendor-specific operation callbacks + * @devdata: Private data for vendor implementation + * @np: Device tree node associated with this cooling device + * @lock: Mutex to protect cooling device operations + */ +struct remoteproc_cdev { + struct thermal_cooling_device *cdev; + const struct remoteproc_cooling_ops *ops; + void *devdata; + struct device_node *np; + struct mutex lock; +}; + + +/* Thermal cooling device callbacks */ + +static int remoteproc_get_max_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + struct remoteproc_cdev *rproc_cdev = cdev->devdata; + int ret; + + if (!rproc_cdev || !rproc_cdev->ops) + return -EINVAL; + + mutex_lock(&rproc_cdev->lock); + ret = rproc_cdev->ops->get_max_level(rproc_cdev->devdata, state); + mutex_unlock(&rproc_cdev->lock); + + return ret; +} + +static int remoteproc_get_cur_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + struct remoteproc_cdev *rproc_cdev = cdev->devdata; + int ret; + + if (!rproc_cdev || !rproc_cdev->ops) + return -EINVAL; + + mutex_lock(&rproc_cdev->lock); + ret = rproc_cdev->ops->get_cur_level(rproc_cdev->devdata, state); + mutex_unlock(&rproc_cdev->lock); + + return ret; +} + +static int remoteproc_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long state) +{ + struct remoteproc_cdev *rproc_cdev = cdev->devdata; + int ret; + + if (!rproc_cdev || !rproc_cdev->ops) + return -EINVAL; + + mutex_lock(&rproc_cdev->lock); + ret = rproc_cdev->ops->set_cur_level(rproc_cdev->devdata, state); + mutex_unlock(&rproc_cdev->lock); + + return ret; +} + +static const struct thermal_cooling_device_ops remoteproc_cooling_ops = { + .get_max_state = remoteproc_get_max_state, + .get_cur_state = remoteproc_get_cur_state, + .set_cur_state = remoteproc_set_cur_state, +}; + +struct remoteproc_cdev * +remoteproc_cooling_register(struct device_node *np, + const char *name, const struct remoteproc_cooling_ops *ops, + void *devdata) +{ + struct remoteproc_cdev *rproc_cdev; + struct thermal_cooling_device *cdev; + int ret; + + if (!name || !ops) { + return ERR_PTR(-EINVAL); + } + + rproc_cdev = kzalloc(sizeof(*rproc_cdev), GFP_KERNEL); + if (!rproc_cdev) + return ERR_PTR(-ENOMEM); + + rproc_cdev->ops = ops; + rproc_cdev->devdata = devdata; + rproc_cdev->np = np; + mutex_init(&rproc_cdev->lock); + + char *rproc_name __free(kfree) = + kasprintf(GFP_KERNEL, REMOTEPROC_PREFIX "%s", name); + /* Register with thermal framework */ + if (np) { + cdev = thermal_of_cooling_device_register(np, rproc_name, rproc_cdev, + &remoteproc_cooling_ops); + } + + if (IS_ERR(cdev)) { + ret = PTR_ERR(cdev); + goto free_rproc_cdev; + } + + rproc_cdev->cdev = cdev; + + return rproc_cdev; + +free_rproc_cdev: + kfree(rproc_cdev); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(remoteproc_cooling_register); + +void remoteproc_cooling_unregister(struct remoteproc_cdev *rproc_cdev) +{ + if (!rproc_cdev) + return; + + thermal_cooling_device_unregister(rproc_cdev->cdev); + mutex_destroy(&rproc_cdev->lock); + kfree(rproc_cdev); +} +EXPORT_SYMBOL_GPL(remoteproc_cooling_unregister); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Remote Processor Cooling Device"); diff --git a/include/linux/remoteproc_cooling.h b/include/linux/remoteproc_cooling.h new file mode 100644 index 000000000000..ef94019d220d --- /dev/null +++ b/include/linux/remoteproc_cooling.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Remote Processor Cooling Device + * + * Copyright (c) 2025, Qualcomm Innovation Center + */ + +#ifndef __REMOTEPROC_COOLING_H__ +#define __REMOTEPROC_COOLING_H__ + +#include + +struct device; +struct device_node; + +struct remoteproc_cooling_ops { + int (*get_max_level)(void *devdata, unsigned long *level); + int (*get_cur_level)(void *devdata, unsigned long *level); + int (*set_cur_level)(void *devdata, unsigned long level); +}; + +struct remoteproc_cdev; + +#ifdef CONFIG_REMOTEPROC_THERMAL + +struct remoteproc_cdev * +remoteproc_cooling_register(struct device_node *np, + const char *name, + const struct remoteproc_cooling_ops *ops, + void *devdata); + +void remoteproc_cooling_unregister(struct remoteproc_cdev *rproc_cdev); + +#else /* !CONFIG_REMOTEPROC_THERMAL */ + +static inline struct remoteproc_cdev * +remoteproc_cooling_register(struct device_node *np, + const char *name, + const struct remoteproc_cooling_ops *ops, + void *devdata) +{ + return ERR_PTR(-EINVAL); +} + +static inline void +remoteproc_cooling_unregister(struct remoteproc_cdev *rproc_cdev) +{ +} + +#endif /* CONFIG_REMOTEPROC_THERMAL */ + +#endif /* __REMOTEPROC_COOLING_H__ */