|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* |
| 3 | + * Copyright (C) 2025 Intel Corporation |
| 4 | + */ |
| 5 | + |
| 6 | +#include <linux/component.h> |
| 7 | +#include <linux/mei_cl_bus.h> |
| 8 | +#include <linux/module.h> |
| 9 | +#include <linux/overflow.h> |
| 10 | +#include <linux/pci.h> |
| 11 | +#include <linux/slab.h> |
| 12 | +#include <linux/uuid.h> |
| 13 | + |
| 14 | +#include <drm/intel/i915_component.h> |
| 15 | +#include <drm/intel/intel_lb_mei_interface.h> |
| 16 | + |
| 17 | +#include "mkhi.h" |
| 18 | + |
| 19 | +/** |
| 20 | + * DOC: Late Binding Firmware Update/Upload |
| 21 | + * |
| 22 | + * Late Binding is a firmware update/upload mechanism that allows configuration |
| 23 | + * payloads to be securely delivered and applied at runtime, rather than |
| 24 | + * being embedded in the system firmware image (e.g., IFWI or SPI flash). |
| 25 | + * |
| 26 | + * This mechanism is used to update device-level configuration such as: |
| 27 | + * - Fan controller |
| 28 | + * - Voltage regulator (VR) |
| 29 | + * |
| 30 | + * Key Characteristics: |
| 31 | + * --------------------- |
| 32 | + * - Runtime Delivery: |
| 33 | + * Firmware blobs are loaded by the host driver (e.g., Xe KMD) |
| 34 | + * after the GPU or SoC has booted. |
| 35 | + * |
| 36 | + * - Secure and Authenticated: |
| 37 | + * All payloads are signed and verified by the authentication firmware. |
| 38 | + * |
| 39 | + * - No Firmware Flashing Required: |
| 40 | + * Updates are applied in volatile memory and do not require SPI flash |
| 41 | + * modification or system reboot. |
| 42 | + * |
| 43 | + * - Re-entrant: |
| 44 | + * Multiple updates of the same or different types can be applied |
| 45 | + * sequentially within a single boot session. |
| 46 | + * |
| 47 | + * - Version Controlled: |
| 48 | + * Each payload includes version and security version number (SVN) |
| 49 | + * metadata to support anti-rollback enforcement. |
| 50 | + * |
| 51 | + * Upload Flow: |
| 52 | + * ------------ |
| 53 | + * 1. Host driver (KMD or user-space tool) loads the late binding firmware. |
| 54 | + * 2. Firmware is passed to the MEI interface and forwarded to |
| 55 | + * authentication firmware. |
| 56 | + * 3. Authentication firmware authenticates the payload and extracts |
| 57 | + * command and data arrays. |
| 58 | + * 4. Authentication firmware delivers the configuration to PUnit/PCODE. |
| 59 | + * 5. Status is returned back to the host via MEI. |
| 60 | + */ |
| 61 | + |
| 62 | +#define INTEL_LB_CMD 0x12 |
| 63 | +#define INTEL_LB_RSP (INTEL_LB_CMD | 0x80) |
| 64 | + |
| 65 | +#define INTEL_LB_SEND_TIMEOUT_MSEC 3000 |
| 66 | +#define INTEL_LB_RECV_TIMEOUT_MSEC 3000 |
| 67 | + |
| 68 | +/** |
| 69 | + * struct mei_lb_req - Late Binding request structure |
| 70 | + * @header: MKHI message header (see struct mkhi_msg_hdr) |
| 71 | + * @type: Type of the Late Binding payload |
| 72 | + * @flags: Flags to be passed to the authentication firmware (e.g. %INTEL_LB_FLAGS_IS_PERSISTENT) |
| 73 | + * @reserved: Reserved for future use by authentication firmware, must be set to 0 |
| 74 | + * @payload_size: Size of the payload data in bytes |
| 75 | + * @payload: Payload data to be sent to the authentication firmware |
| 76 | + */ |
| 77 | +struct mei_lb_req { |
| 78 | + struct mkhi_msg_hdr header; |
| 79 | + __le32 type; |
| 80 | + __le32 flags; |
| 81 | + __le32 reserved[2]; |
| 82 | + __le32 payload_size; |
| 83 | + u8 payload[] __counted_by(payload_size); |
| 84 | +} __packed; |
| 85 | + |
| 86 | +/** |
| 87 | + * struct mei_lb_rsp - Late Binding response structure |
| 88 | + * @header: MKHI message header (see struct mkhi_msg_hdr) |
| 89 | + * @type: Type of the Late Binding payload |
| 90 | + * @reserved: Reserved for future use by authentication firmware, must be set to 0 |
| 91 | + * @status: Status returned by authentication firmware (see &enum intel_lb_status) |
| 92 | + */ |
| 93 | +struct mei_lb_rsp { |
| 94 | + struct mkhi_msg_hdr header; |
| 95 | + __le32 type; |
| 96 | + __le32 reserved[2]; |
| 97 | + __le32 status; |
| 98 | +} __packed; |
| 99 | + |
| 100 | +static bool mei_lb_check_response(const struct device *dev, ssize_t bytes, |
| 101 | + struct mei_lb_rsp *rsp) |
| 102 | +{ |
| 103 | + /* |
| 104 | + * Received message size may be smaller than the full message size when |
| 105 | + * reply contains only MKHI header with result field set to the error code. |
| 106 | + * Check the header size and content first to output exact error, if needed, |
| 107 | + * and then process to the whole message. |
| 108 | + */ |
| 109 | + if (bytes < sizeof(rsp->header)) { |
| 110 | + dev_err(dev, "Received less than header size from the firmware: %zd < %zu\n", |
| 111 | + bytes, sizeof(rsp->header)); |
| 112 | + return false; |
| 113 | + } |
| 114 | + if (rsp->header.group_id != MKHI_GROUP_ID_GFX) { |
| 115 | + dev_err(dev, "Mismatch group id: 0x%x instead of 0x%x\n", |
| 116 | + rsp->header.group_id, MKHI_GROUP_ID_GFX); |
| 117 | + return false; |
| 118 | + } |
| 119 | + if (rsp->header.command != INTEL_LB_RSP) { |
| 120 | + dev_err(dev, "Mismatch command: 0x%x instead of 0x%x\n", |
| 121 | + rsp->header.command, INTEL_LB_RSP); |
| 122 | + return false; |
| 123 | + } |
| 124 | + if (rsp->header.result) { |
| 125 | + dev_err(dev, "Error in result: 0x%x\n", rsp->header.result); |
| 126 | + return false; |
| 127 | + } |
| 128 | + if (bytes < sizeof(*rsp)) { |
| 129 | + dev_err(dev, "Received less than message size from the firmware: %zd < %zu\n", |
| 130 | + bytes, sizeof(*rsp)); |
| 131 | + return false; |
| 132 | + } |
| 133 | + |
| 134 | + return true; |
| 135 | +} |
| 136 | + |
| 137 | +static int mei_lb_push_payload(struct device *dev, |
| 138 | + enum intel_lb_type type, u32 flags, |
| 139 | + const void *payload, size_t payload_size) |
| 140 | +{ |
| 141 | + struct mei_cl_device *cldev; |
| 142 | + struct mei_lb_req *req = NULL; |
| 143 | + struct mei_lb_rsp rsp; |
| 144 | + size_t req_size; |
| 145 | + ssize_t bytes; |
| 146 | + int ret; |
| 147 | + |
| 148 | + cldev = to_mei_cl_device(dev); |
| 149 | + |
| 150 | + ret = mei_cldev_enable(cldev); |
| 151 | + if (ret) { |
| 152 | + dev_dbg(dev, "Failed to enable firmware client. %d\n", ret); |
| 153 | + return ret; |
| 154 | + } |
| 155 | + |
| 156 | + req_size = struct_size(req, payload, payload_size); |
| 157 | + if (req_size > mei_cldev_mtu(cldev)) { |
| 158 | + dev_err(dev, "Payload is too big: %zu\n", payload_size); |
| 159 | + ret = -EMSGSIZE; |
| 160 | + goto end; |
| 161 | + } |
| 162 | + |
| 163 | + req = kmalloc(req_size, GFP_KERNEL); |
| 164 | + if (!req) { |
| 165 | + ret = -ENOMEM; |
| 166 | + goto end; |
| 167 | + } |
| 168 | + |
| 169 | + req->header.group_id = MKHI_GROUP_ID_GFX; |
| 170 | + req->header.command = INTEL_LB_CMD; |
| 171 | + req->type = cpu_to_le32(type); |
| 172 | + req->flags = cpu_to_le32(flags); |
| 173 | + req->reserved[0] = 0; |
| 174 | + req->reserved[1] = 0; |
| 175 | + req->payload_size = cpu_to_le32(payload_size); |
| 176 | + memcpy(req->payload, payload, payload_size); |
| 177 | + |
| 178 | + bytes = mei_cldev_send_timeout(cldev, (u8 *)req, req_size, |
| 179 | + INTEL_LB_SEND_TIMEOUT_MSEC); |
| 180 | + if (bytes < 0) { |
| 181 | + dev_err(dev, "Failed to send late binding request to firmware. %zd\n", bytes); |
| 182 | + ret = bytes; |
| 183 | + goto end; |
| 184 | + } |
| 185 | + |
| 186 | + bytes = mei_cldev_recv_timeout(cldev, (u8 *)&rsp, sizeof(rsp), |
| 187 | + INTEL_LB_RECV_TIMEOUT_MSEC); |
| 188 | + if (bytes < 0) { |
| 189 | + dev_err(dev, "Failed to receive late binding reply from MEI firmware. %zd\n", |
| 190 | + bytes); |
| 191 | + ret = bytes; |
| 192 | + goto end; |
| 193 | + } |
| 194 | + if (!mei_lb_check_response(dev, bytes, &rsp)) { |
| 195 | + dev_err(dev, "Bad response from the firmware. header: %02x %02x %02x %02x\n", |
| 196 | + rsp.header.group_id, rsp.header.command, |
| 197 | + rsp.header.reserved, rsp.header.result); |
| 198 | + ret = -EPROTO; |
| 199 | + goto end; |
| 200 | + } |
| 201 | + |
| 202 | + dev_dbg(dev, "status = %u\n", le32_to_cpu(rsp.status)); |
| 203 | + ret = (int)le32_to_cpu(rsp.status); |
| 204 | +end: |
| 205 | + mei_cldev_disable(cldev); |
| 206 | + kfree(req); |
| 207 | + return ret; |
| 208 | +} |
| 209 | + |
| 210 | +static const struct intel_lb_component_ops mei_lb_ops = { |
| 211 | + .push_payload = mei_lb_push_payload, |
| 212 | +}; |
| 213 | + |
| 214 | +static int mei_lb_component_master_bind(struct device *dev) |
| 215 | +{ |
| 216 | + return component_bind_all(dev, (void *)&mei_lb_ops); |
| 217 | +} |
| 218 | + |
| 219 | +static void mei_lb_component_master_unbind(struct device *dev) |
| 220 | +{ |
| 221 | + component_unbind_all(dev, (void *)&mei_lb_ops); |
| 222 | +} |
| 223 | + |
| 224 | +static const struct component_master_ops mei_lb_component_master_ops = { |
| 225 | + .bind = mei_lb_component_master_bind, |
| 226 | + .unbind = mei_lb_component_master_unbind, |
| 227 | +}; |
| 228 | + |
| 229 | +static int mei_lb_component_match(struct device *dev, int subcomponent, |
| 230 | + void *data) |
| 231 | +{ |
| 232 | + /* |
| 233 | + * This function checks if requester is Intel %PCI_CLASS_DISPLAY_VGA or |
| 234 | + * %PCI_CLASS_DISPLAY_OTHER device, and checks if the requester is the |
| 235 | + * grand parent of mei_if i.e. late bind MEI device |
| 236 | + */ |
| 237 | + struct device *base = data; |
| 238 | + struct pci_dev *pdev; |
| 239 | + |
| 240 | + if (!dev) |
| 241 | + return 0; |
| 242 | + |
| 243 | + if (!dev_is_pci(dev)) |
| 244 | + return 0; |
| 245 | + |
| 246 | + pdev = to_pci_dev(dev); |
| 247 | + |
| 248 | + if (pdev->vendor != PCI_VENDOR_ID_INTEL) |
| 249 | + return 0; |
| 250 | + |
| 251 | + if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8) && |
| 252 | + pdev->class != (PCI_CLASS_DISPLAY_OTHER << 8)) |
| 253 | + return 0; |
| 254 | + |
| 255 | + if (subcomponent != INTEL_COMPONENT_LB) |
| 256 | + return 0; |
| 257 | + |
| 258 | + base = base->parent; |
| 259 | + if (!base) /* mei device */ |
| 260 | + return 0; |
| 261 | + |
| 262 | + base = base->parent; /* pci device */ |
| 263 | + |
| 264 | + return !!base && dev == base; |
| 265 | +} |
| 266 | + |
| 267 | +static int mei_lb_probe(struct mei_cl_device *cldev, |
| 268 | + const struct mei_cl_device_id *id) |
| 269 | +{ |
| 270 | + struct component_match *master_match = NULL; |
| 271 | + int ret; |
| 272 | + |
| 273 | + component_match_add_typed(&cldev->dev, &master_match, |
| 274 | + mei_lb_component_match, &cldev->dev); |
| 275 | + if (IS_ERR_OR_NULL(master_match)) |
| 276 | + return -ENOMEM; |
| 277 | + |
| 278 | + ret = component_master_add_with_match(&cldev->dev, |
| 279 | + &mei_lb_component_master_ops, |
| 280 | + master_match); |
| 281 | + if (ret < 0) |
| 282 | + dev_err(&cldev->dev, "Failed to add late binding master component. %d\n", ret); |
| 283 | + |
| 284 | + return ret; |
| 285 | +} |
| 286 | + |
| 287 | +static void mei_lb_remove(struct mei_cl_device *cldev) |
| 288 | +{ |
| 289 | + component_master_del(&cldev->dev, &mei_lb_component_master_ops); |
| 290 | +} |
| 291 | + |
| 292 | +#define MEI_GUID_MKHI UUID_LE(0xe2c2afa2, 0x3817, 0x4d19, \ |
| 293 | + 0x9d, 0x95, 0x6, 0xb1, 0x6b, 0x58, 0x8a, 0x5d) |
| 294 | + |
| 295 | +static const struct mei_cl_device_id mei_lb_tbl[] = { |
| 296 | + { .uuid = MEI_GUID_MKHI, .version = MEI_CL_VERSION_ANY }, |
| 297 | + { } |
| 298 | +}; |
| 299 | +MODULE_DEVICE_TABLE(mei, mei_lb_tbl); |
| 300 | + |
| 301 | +static struct mei_cl_driver mei_lb_driver = { |
| 302 | + .id_table = mei_lb_tbl, |
| 303 | + .name = "mei_lb", |
| 304 | + .probe = mei_lb_probe, |
| 305 | + .remove = mei_lb_remove, |
| 306 | +}; |
| 307 | + |
| 308 | +module_mei_cl_driver(mei_lb_driver); |
| 309 | + |
| 310 | +MODULE_AUTHOR("Intel Corporation"); |
| 311 | +MODULE_LICENSE("GPL"); |
| 312 | +MODULE_DESCRIPTION("MEI Late Binding Firmware Update/Upload"); |
0 commit comments