Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * System Control and Management Interface (SCMI) Message Protocol driver
4 *
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
8 * Cortex M3 and AP.
9 *
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
13 *
14 * Copyright (C) 2018-2025 ARM Ltd.
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/bitmap.h>
20#include <linux/debugfs.h>
21#include <linux/device.h>
22#include <linux/export.h>
23#include <linux/idr.h>
24#include <linux/io.h>
25#include <linux/io-64-nonatomic-hi-lo.h>
26#include <linux/kernel.h>
27#include <linux/kmod.h>
28#include <linux/ktime.h>
29#include <linux/hashtable.h>
30#include <linux/list.h>
31#include <linux/module.h>
32#include <linux/of.h>
33#include <linux/platform_device.h>
34#include <linux/processor.h>
35#include <linux/refcount.h>
36#include <linux/slab.h>
37#include <linux/xarray.h>
38
39#include "common.h"
40#include "notify.h"
41#include "quirks.h"
42
43#include "raw_mode.h"
44
45#define CREATE_TRACE_POINTS
46#include <trace/events/scmi.h>
47
48#define SCMI_VENDOR_MODULE_ALIAS_FMT "scmi-protocol-0x%02x-%s"
49
50static DEFINE_IDA(scmi_id);
51
52static DEFINE_XARRAY(scmi_protocols);
53
54/* List of all SCMI devices active in system */
55static LIST_HEAD(scmi_list);
56/* Protection for the entire list */
57static DEFINE_MUTEX(scmi_list_mutex);
58/* Track the unique id for the transfers for debug & profiling purpose */
59static atomic_t transfer_last_id;
60
61static struct dentry *scmi_top_dentry;
62
63/**
64 * struct scmi_xfers_info - Structure to manage transfer information
65 *
66 * @xfer_alloc_table: Bitmap table for allocated messages.
67 * Index of this bitmap table is also used for message
68 * sequence identifier.
69 * @xfer_lock: Protection for message allocation
70 * @max_msg: Maximum number of messages that can be pending
71 * @free_xfers: A free list for available to use xfers. It is initialized with
72 * a number of xfers equal to the maximum allowed in-flight
73 * messages.
74 * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
75 * currently in-flight messages.
76 */
77struct scmi_xfers_info {
78 unsigned long *xfer_alloc_table;
79 spinlock_t xfer_lock;
80 int max_msg;
81 struct hlist_head free_xfers;
82 DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
83};
84
85/**
86 * struct scmi_protocol_instance - Describe an initialized protocol instance.
87 * @handle: Reference to the SCMI handle associated to this protocol instance.
88 * @proto: A reference to the protocol descriptor.
89 * @gid: A reference for per-protocol devres management.
90 * @users: A refcount to track effective users of this protocol.
91 * @priv: Reference for optional protocol private data.
92 * @version: Protocol version supported by the platform as detected at runtime.
93 * @negotiated_version: When the platform supports a newer protocol version,
94 * the agent will try to negotiate with the platform the
95 * usage of the newest version known to it, since
96 * backward compatibility is NOT automatically assured.
97 * This field is NON-zero when a successful negotiation
98 * has completed.
99 * @ph: An embedded protocol handle that will be passed down to protocol
100 * initialization code to identify this instance.
101 *
102 * Each protocol is initialized independently once for each SCMI platform in
103 * which is defined by DT and implemented by the SCMI server fw.
104 */
105struct scmi_protocol_instance {
106 const struct scmi_handle *handle;
107 const struct scmi_protocol *proto;
108 void *gid;
109 refcount_t users;
110 void *priv;
111 unsigned int version;
112 unsigned int negotiated_version;
113 struct scmi_protocol_handle ph;
114};
115
116#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
117
118/**
119 * struct scmi_info - Structure representing a SCMI instance
120 *
121 * @id: A sequence number starting from zero identifying this instance
122 * @dev: Device pointer
123 * @desc: SoC description for this instance
124 * @version: SCMI revision information containing protocol version,
125 * implementation version and (sub-)vendor identification.
126 * @handle: Instance of SCMI handle to send to clients
127 * @tx_minfo: Universal Transmit Message management info
128 * @rx_minfo: Universal Receive Message management info
129 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
130 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
131 * @protocols: IDR for protocols' instance descriptors initialized for
132 * this SCMI instance: populated on protocol's first attempted
133 * usage.
134 * @protocols_mtx: A mutex to protect protocols instances initialization.
135 * @protocols_imp: List of protocols implemented, currently maximum of
136 * scmi_revision_info.num_protocols elements allocated by the
137 * base protocol
138 * @active_protocols: IDR storing device_nodes for protocols actually defined
139 * in the DT and confirmed as implemented by fw.
140 * @notify_priv: Pointer to private data structure specific to notifications.
141 * @node: List head
142 * @users: Number of users of this instance
143 * @bus_nb: A notifier to listen for device bind/unbind on the scmi bus
144 * @dev_req_nb: A notifier to listen for device request/unrequest on the scmi
145 * bus
146 * @devreq_mtx: A mutex to serialize device creation for this SCMI instance
147 * @dbg: A pointer to debugfs related data (if any)
148 * @raw: An opaque reference handle used by SCMI Raw mode.
149 */
150struct scmi_info {
151 int id;
152 struct device *dev;
153 const struct scmi_desc *desc;
154 struct scmi_revision_info version;
155 struct scmi_handle handle;
156 struct scmi_xfers_info tx_minfo;
157 struct scmi_xfers_info rx_minfo;
158 struct idr tx_idr;
159 struct idr rx_idr;
160 struct idr protocols;
161 /* Ensure mutual exclusive access to protocols instance array */
162 struct mutex protocols_mtx;
163 u8 *protocols_imp;
164 struct idr active_protocols;
165 void *notify_priv;
166 struct list_head node;
167 int users;
168 struct notifier_block bus_nb;
169 struct notifier_block dev_req_nb;
170 /* Serialize device creation process for this instance */
171 struct mutex devreq_mtx;
172 struct scmi_debug_info *dbg;
173 void *raw;
174};
175
176#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
177#define tx_minfo_to_scmi_info(h) container_of(h, struct scmi_info, tx_minfo)
178#define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb)
179#define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb)
180
181static void scmi_rx_callback(struct scmi_chan_info *cinfo,
182 u32 msg_hdr, void *priv);
183static void scmi_bad_message_trace(struct scmi_chan_info *cinfo,
184 u32 msg_hdr, enum scmi_bad_msg err);
185
186static struct scmi_transport_core_operations scmi_trans_core_ops = {
187 .bad_message_trace = scmi_bad_message_trace,
188 .rx_callback = scmi_rx_callback,
189};
190
191static unsigned long
192scmi_vendor_protocol_signature(unsigned int protocol_id, char *vendor_id,
193 char *sub_vendor_id, u32 impl_ver)
194{
195 char *signature, *p;
196 unsigned long hash = 0;
197
198 /* vendor_id/sub_vendor_id guaranteed <= SCMI_SHORT_NAME_MAX_SIZE */
199 signature = kasprintf(GFP_KERNEL, "%02X|%s|%s|0x%08X", protocol_id,
200 vendor_id ?: "", sub_vendor_id ?: "", impl_ver);
201 if (!signature)
202 return 0;
203
204 p = signature;
205 while (*p)
206 hash = partial_name_hash(tolower(*p++), hash);
207 hash = end_name_hash(hash);
208
209 kfree(signature);
210
211 return hash;
212}
213
214static unsigned long
215scmi_protocol_key_calculate(int protocol_id, char *vendor_id,
216 char *sub_vendor_id, u32 impl_ver)
217{
218 if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
219 return protocol_id;
220 else
221 return scmi_vendor_protocol_signature(protocol_id, vendor_id,
222 sub_vendor_id, impl_ver);
223}
224
225static const struct scmi_protocol *
226__scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
227 char *sub_vendor_id, u32 impl_ver)
228{
229 unsigned long key;
230 struct scmi_protocol *proto = NULL;
231
232 key = scmi_protocol_key_calculate(protocol_id, vendor_id,
233 sub_vendor_id, impl_ver);
234 if (key)
235 proto = xa_load(&scmi_protocols, key);
236
237 return proto;
238}
239
240static const struct scmi_protocol *
241scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
242 char *sub_vendor_id, u32 impl_ver)
243{
244 const struct scmi_protocol *proto = NULL;
245
246 /* Searching for closest match ...*/
247 proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
248 sub_vendor_id, impl_ver);
249 if (proto)
250 return proto;
251
252 /* Any match just on vendor/sub_vendor ? */
253 if (impl_ver) {
254 proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
255 sub_vendor_id, 0);
256 if (proto)
257 return proto;
258 }
259
260 /* Any match just on the vendor ? */
261 if (sub_vendor_id)
262 proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
263 NULL, 0);
264 return proto;
265}
266
267static const struct scmi_protocol *
268scmi_vendor_protocol_get(int protocol_id, struct scmi_revision_info *version)
269{
270 const struct scmi_protocol *proto;
271
272 proto = scmi_vendor_protocol_lookup(protocol_id, version->vendor_id,
273 version->sub_vendor_id,
274 version->impl_ver);
275 if (!proto) {
276 int ret;
277
278 pr_debug("Looking for '" SCMI_VENDOR_MODULE_ALIAS_FMT "'\n",
279 protocol_id, version->vendor_id);
280
281 /* Note that vendor_id is mandatory for vendor protocols */
282 ret = request_module(SCMI_VENDOR_MODULE_ALIAS_FMT,
283 protocol_id, version->vendor_id);
284 if (ret) {
285 pr_warn("Problem loading module for protocol 0x%x\n",
286 protocol_id);
287 return NULL;
288 }
289
290 /* Lookup again, once modules loaded */
291 proto = scmi_vendor_protocol_lookup(protocol_id,
292 version->vendor_id,
293 version->sub_vendor_id,
294 version->impl_ver);
295 }
296
297 if (proto)
298 pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
299 protocol_id, proto->vendor_id ?: "",
300 proto->sub_vendor_id ?: "", proto->impl_ver);
301
302 return proto;
303}
304
305static const struct scmi_protocol *
306scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
307{
308 const struct scmi_protocol *proto = NULL;
309
310 if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
311 proto = xa_load(&scmi_protocols, protocol_id);
312 else
313 proto = scmi_vendor_protocol_get(protocol_id, version);
314
315 if (!proto || !try_module_get(proto->owner)) {
316 pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
317 return NULL;
318 }
319
320 pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
321
322 return proto;
323}
324
325static void scmi_protocol_put(const struct scmi_protocol *proto)
326{
327 if (proto)
328 module_put(proto->owner);
329}
330
331static int scmi_vendor_protocol_check(const struct scmi_protocol *proto)
332{
333 if (!proto->vendor_id) {
334 pr_err("missing vendor_id for protocol 0x%x\n", proto->id);
335 return -EINVAL;
336 }
337
338 if (strlen(proto->vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) {
339 pr_err("malformed vendor_id for protocol 0x%x\n", proto->id);
340 return -EINVAL;
341 }
342
343 if (proto->sub_vendor_id &&
344 strlen(proto->sub_vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) {
345 pr_err("malformed sub_vendor_id for protocol 0x%x\n",
346 proto->id);
347 return -EINVAL;
348 }
349
350 return 0;
351}
352
353int scmi_protocol_register(const struct scmi_protocol *proto)
354{
355 int ret;
356 unsigned long key;
357
358 if (!proto) {
359 pr_err("invalid protocol\n");
360 return -EINVAL;
361 }
362
363 if (!proto->instance_init) {
364 pr_err("missing init for protocol 0x%x\n", proto->id);
365 return -EINVAL;
366 }
367
368 if (proto->id >= SCMI_PROTOCOL_VENDOR_BASE &&
369 scmi_vendor_protocol_check(proto))
370 return -EINVAL;
371
372 /*
373 * Calculate a protocol key to register this protocol with the core;
374 * key value 0 is considered invalid.
375 */
376 key = scmi_protocol_key_calculate(proto->id, proto->vendor_id,
377 proto->sub_vendor_id,
378 proto->impl_ver);
379 if (!key)
380 return -EINVAL;
381
382 ret = xa_insert(&scmi_protocols, key, (void *)proto, GFP_KERNEL);
383 if (ret) {
384 pr_err("unable to allocate SCMI protocol slot for 0x%x - err %d\n",
385 proto->id, ret);
386 return ret;
387 }
388
389 pr_debug("Registered SCMI Protocol 0x%x - %s %s 0x%08X\n",
390 proto->id, proto->vendor_id, proto->sub_vendor_id,
391 proto->impl_ver);
392
393 return 0;
394}
395EXPORT_SYMBOL_GPL(scmi_protocol_register);
396
397void scmi_protocol_unregister(const struct scmi_protocol *proto)
398{
399 unsigned long key;
400
401 key = scmi_protocol_key_calculate(proto->id, proto->vendor_id,
402 proto->sub_vendor_id,
403 proto->impl_ver);
404 if (!key)
405 return;
406
407 xa_erase(&scmi_protocols, key);
408
409 pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
410}
411EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
412
413/**
414 * scmi_create_protocol_devices - Create devices for all pending requests for
415 * this SCMI instance.
416 *
417 * @np: The device node describing the protocol
418 * @info: The SCMI instance descriptor
419 * @prot_id: The protocol ID
420 * @name: The optional name of the device to be created: if not provided this
421 * call will lead to the creation of all the devices currently requested
422 * for the specified protocol.
423 */
424static void scmi_create_protocol_devices(struct device_node *np,
425 struct scmi_info *info,
426 int prot_id, const char *name)
427{
428 mutex_lock(&info->devreq_mtx);
429 scmi_device_create(np, info->dev, prot_id, name);
430 mutex_unlock(&info->devreq_mtx);
431}
432
433static void scmi_destroy_protocol_devices(struct scmi_info *info,
434 int prot_id, const char *name)
435{
436 mutex_lock(&info->devreq_mtx);
437 scmi_device_destroy(info->dev, prot_id, name);
438 mutex_unlock(&info->devreq_mtx);
439}
440
441void scmi_notification_instance_data_set(const struct scmi_handle *handle,
442 void *priv)
443{
444 struct scmi_info *info = handle_to_scmi_info(handle);
445
446 info->notify_priv = priv;
447 /* Ensure updated protocol private date are visible */
448 smp_wmb();
449}
450
451void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
452{
453 struct scmi_info *info = handle_to_scmi_info(handle);
454
455 /* Ensure protocols_private_data has been updated */
456 smp_rmb();
457 return info->notify_priv;
458}
459
460/**
461 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
462 *
463 * @minfo: Pointer to Tx/Rx Message management info based on channel type
464 * @xfer: The xfer to act upon
465 *
466 * Pick the next unused monotonically increasing token and set it into
467 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
468 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
469 * of incorrect association of a late and expired xfer with a live in-flight
470 * transaction, both happening to re-use the same token identifier.
471 *
472 * Since platform is NOT required to answer our request in-order we should
473 * account for a few rare but possible scenarios:
474 *
475 * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
476 * using find_next_zero_bit() starting from candidate next_token bit
477 *
478 * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
479 * are plenty of free tokens at start, so try a second pass using
480 * find_next_zero_bit() and starting from 0.
481 *
482 * X = used in-flight
483 *
484 * Normal
485 * ------
486 *
487 * |- xfer_id picked
488 * -----------+----------------------------------------------------------
489 * | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
490 * ----------------------------------------------------------------------
491 * ^
492 * |- next_token
493 *
494 * Out-of-order pending at start
495 * -----------------------------
496 *
497 * |- xfer_id picked, last_token fixed
498 * -----+----------------------------------------------------------------
499 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
500 * ----------------------------------------------------------------------
501 * ^
502 * |- next_token
503 *
504 *
505 * Out-of-order pending at end
506 * ---------------------------
507 *
508 * |- xfer_id picked, last_token fixed
509 * -----+----------------------------------------------------------------
510 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
511 * ----------------------------------------------------------------------
512 * ^
513 * |- next_token
514 *
515 * Context: Assumes to be called with @xfer_lock already acquired.
516 *
517 * Return: 0 on Success or error
518 */
519static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
520 struct scmi_xfer *xfer)
521{
522 unsigned long xfer_id, next_token;
523
524 /*
525 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
526 * using the pre-allocated transfer_id as a base.
527 * Note that the global transfer_id is shared across all message types
528 * so there could be holes in the allocated set of monotonic sequence
529 * numbers, but that is going to limit the effectiveness of the
530 * mitigation only in very rare limit conditions.
531 */
532 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
533
534 /* Pick the next available xfer_id >= next_token */
535 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
536 MSG_TOKEN_MAX, next_token);
537 if (xfer_id == MSG_TOKEN_MAX) {
538 /*
539 * After heavily out-of-order responses, there are no free
540 * tokens ahead, but only at start of xfer_alloc_table so
541 * try again from the beginning.
542 */
543 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
544 MSG_TOKEN_MAX, 0);
545 /*
546 * Something is wrong if we got here since there can be a
547 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
548 * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
549 */
550 if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
551 return -ENOMEM;
552 }
553
554 /* Update +/- last_token accordingly if we skipped some hole */
555 if (xfer_id != next_token)
556 atomic_add((int)(xfer_id - next_token), &transfer_last_id);
557
558 xfer->hdr.seq = (u16)xfer_id;
559
560 return 0;
561}
562
563/**
564 * scmi_xfer_token_clear - Release the token
565 *
566 * @minfo: Pointer to Tx/Rx Message management info based on channel type
567 * @xfer: The xfer to act upon
568 */
569static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
570 struct scmi_xfer *xfer)
571{
572 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
573}
574
575/**
576 * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
577 *
578 * @xfer: The xfer to register
579 * @minfo: Pointer to Tx/Rx Message management info based on channel type
580 *
581 * Note that this helper assumes that the xfer to be registered as in-flight
582 * had been built using an xfer sequence number which still corresponds to a
583 * free slot in the xfer_alloc_table.
584 *
585 * Context: Assumes to be called with @xfer_lock already acquired.
586 */
587static inline void
588scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
589 struct scmi_xfers_info *minfo)
590{
591 /* In this context minfo will be tx_minfo due to the xfer pending */
592 struct scmi_info *info = tx_minfo_to_scmi_info(minfo);
593
594 /* Set in-flight */
595 set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
596 hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
597 scmi_inc_count(info->dbg, XFERS_INFLIGHT);
598
599 xfer->pending = true;
600}
601
602/**
603 * scmi_xfer_inflight_register - Try to register an xfer as in-flight
604 *
605 * @xfer: The xfer to register
606 * @minfo: Pointer to Tx/Rx Message management info based on channel type
607 *
608 * Note that this helper does NOT assume anything about the sequence number
609 * that was baked into the provided xfer, so it checks at first if it can
610 * be mapped to a free slot and fails with an error if another xfer with the
611 * same sequence number is currently still registered as in-flight.
612 *
613 * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
614 * could not rbe mapped to a free slot in the xfer_alloc_table.
615 */
616static int scmi_xfer_inflight_register(struct scmi_xfer *xfer,
617 struct scmi_xfers_info *minfo)
618{
619 int ret = 0;
620 unsigned long flags;
621
622 spin_lock_irqsave(&minfo->xfer_lock, flags);
623 if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table))
624 scmi_xfer_inflight_register_unlocked(xfer, minfo);
625 else
626 ret = -EBUSY;
627 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
628
629 return ret;
630}
631
632/**
633 * scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
634 * flight on the TX channel, if possible.
635 *
636 * @handle: Pointer to SCMI entity handle
637 * @xfer: The xfer to register
638 *
639 * Return: 0 on Success, error otherwise
640 */
641int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
642 struct scmi_xfer *xfer)
643{
644 struct scmi_info *info = handle_to_scmi_info(handle);
645
646 return scmi_xfer_inflight_register(xfer, &info->tx_minfo);
647}
648
649/**
650 * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
651 * as pending in-flight
652 *
653 * @xfer: The xfer to act upon
654 * @minfo: Pointer to Tx/Rx Message management info based on channel type
655 *
656 * Return: 0 on Success or error otherwise
657 */
658static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer,
659 struct scmi_xfers_info *minfo)
660{
661 int ret;
662 unsigned long flags;
663
664 spin_lock_irqsave(&minfo->xfer_lock, flags);
665 /* Set a new monotonic token as the xfer sequence number */
666 ret = scmi_xfer_token_set(minfo, xfer);
667 if (!ret)
668 scmi_xfer_inflight_register_unlocked(xfer, minfo);
669 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
670
671 return ret;
672}
673
674/**
675 * scmi_xfer_get() - Allocate one message
676 *
677 * @handle: Pointer to SCMI entity handle
678 * @minfo: Pointer to Tx/Rx Message management info based on channel type
679 *
680 * Helper function which is used by various message functions that are
681 * exposed to clients of this driver for allocating a message traffic event.
682 *
683 * Picks an xfer from the free list @free_xfers (if any available) and perform
684 * a basic initialization.
685 *
686 * Note that, at this point, still no sequence number is assigned to the
687 * allocated xfer, nor it is registered as a pending transaction.
688 *
689 * The successfully initialized xfer is refcounted.
690 *
691 * Context: Holds @xfer_lock while manipulating @free_xfers.
692 *
693 * Return: An initialized xfer if all went fine, else pointer error.
694 */
695static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
696 struct scmi_xfers_info *minfo)
697{
698 unsigned long flags;
699 struct scmi_xfer *xfer;
700
701 spin_lock_irqsave(&minfo->xfer_lock, flags);
702 if (hlist_empty(&minfo->free_xfers)) {
703 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
704 return ERR_PTR(-ENOMEM);
705 }
706
707 /* grab an xfer from the free_list */
708 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
709 hlist_del_init(&xfer->node);
710
711 /*
712 * Allocate transfer_id early so that can be used also as base for
713 * monotonic sequence number generation if needed.
714 */
715 xfer->transfer_id = atomic_inc_return(&transfer_last_id);
716
717 refcount_set(&xfer->users, 1);
718 atomic_set(&xfer->busy, SCMI_XFER_FREE);
719 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
720
721 return xfer;
722}
723
724/**
725 * scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
726 *
727 * @handle: Pointer to SCMI entity handle
728 *
729 * Note that xfer is taken from the TX channel structures.
730 *
731 * Return: A valid xfer on Success, or an error-pointer otherwise
732 */
733struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle)
734{
735 struct scmi_xfer *xfer;
736 struct scmi_info *info = handle_to_scmi_info(handle);
737
738 xfer = scmi_xfer_get(handle, &info->tx_minfo);
739 if (!IS_ERR(xfer))
740 xfer->flags |= SCMI_XFER_FLAG_IS_RAW;
741
742 return xfer;
743}
744
745/**
746 * scmi_xfer_raw_channel_get - Helper to get a reference to the proper channel
747 * to use for a specific protocol_id Raw transaction.
748 *
749 * @handle: Pointer to SCMI entity handle
750 * @protocol_id: Identifier of the protocol
751 *
752 * Note that in a regular SCMI stack, usually, a protocol has to be defined in
753 * the DT to have an associated channel and be usable; but in Raw mode any
754 * protocol in range is allowed, re-using the Base channel, so as to enable
755 * fuzzing on any protocol without the need of a fully compiled DT.
756 *
757 * Return: A reference to the channel to use, or an ERR_PTR
758 */
759struct scmi_chan_info *
760scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id)
761{
762 struct scmi_chan_info *cinfo;
763 struct scmi_info *info = handle_to_scmi_info(handle);
764
765 cinfo = idr_find(&info->tx_idr, protocol_id);
766 if (!cinfo) {
767 if (protocol_id == SCMI_PROTOCOL_BASE)
768 return ERR_PTR(-EINVAL);
769 /* Use Base channel for protocols not defined for DT */
770 cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
771 if (!cinfo)
772 return ERR_PTR(-EINVAL);
773 dev_warn_once(handle->dev,
774 "Using Base channel for protocol 0x%X\n",
775 protocol_id);
776 }
777
778 return cinfo;
779}
780
781/**
782 * __scmi_xfer_put() - Release a message
783 *
784 * @minfo: Pointer to Tx/Rx Message management info based on channel type
785 * @xfer: message that was reserved by scmi_xfer_get
786 *
787 * After refcount check, possibly release an xfer, clearing the token slot,
788 * removing xfer from @pending_xfers and putting it back into free_xfers.
789 *
790 * This holds a spinlock to maintain integrity of internal data structures.
791 */
792static void
793__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
794{
795 unsigned long flags;
796
797 spin_lock_irqsave(&minfo->xfer_lock, flags);
798 if (refcount_dec_and_test(&xfer->users)) {
799 if (xfer->pending) {
800 struct scmi_info *info = tx_minfo_to_scmi_info(minfo);
801
802 scmi_xfer_token_clear(minfo, xfer);
803 hash_del(&xfer->node);
804 xfer->pending = false;
805
806 scmi_dec_count(info->dbg, XFERS_INFLIGHT);
807 }
808 xfer->flags = 0;
809 hlist_add_head(&xfer->node, &minfo->free_xfers);
810 }
811 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
812}
813
814/**
815 * scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
816 *
817 * @handle: Pointer to SCMI entity handle
818 * @xfer: A reference to the xfer to put
819 *
820 * Note that as with other xfer_put() handlers the xfer is really effectively
821 * released only if there are no more users on the system.
822 */
823void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
824{
825 struct scmi_info *info = handle_to_scmi_info(handle);
826
827 return __scmi_xfer_put(&info->tx_minfo, xfer);
828}
829
830/**
831 * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
832 *
833 * @minfo: Pointer to Tx/Rx Message management info based on channel type
834 * @xfer_id: Token ID to lookup in @pending_xfers
835 *
836 * Refcounting is untouched.
837 *
838 * Context: Assumes to be called with @xfer_lock already acquired.
839 *
840 * Return: A valid xfer on Success or error otherwise
841 */
842static struct scmi_xfer *
843scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
844{
845 struct scmi_xfer *xfer = NULL;
846
847 if (test_bit(xfer_id, minfo->xfer_alloc_table))
848 xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
849
850 return xfer ?: ERR_PTR(-EINVAL);
851}
852
853/**
854 * scmi_bad_message_trace - A helper to trace weird messages
855 *
856 * @cinfo: A reference to the channel descriptor on which the message was
857 * received
858 * @msg_hdr: Message header to track
859 * @err: A specific error code used as a status value in traces.
860 *
861 * This helper can be used to trace any kind of weird, incomplete, unexpected,
862 * timed-out message that arrives and as such, can be traced only referring to
863 * the header content, since the payload is missing/unreliable.
864 */
865static void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr,
866 enum scmi_bad_msg err)
867{
868 char *tag;
869 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
870
871 switch (MSG_XTRACT_TYPE(msg_hdr)) {
872 case MSG_TYPE_COMMAND:
873 tag = "!RESP";
874 break;
875 case MSG_TYPE_DELAYED_RESP:
876 tag = "!DLYD";
877 break;
878 case MSG_TYPE_NOTIFICATION:
879 tag = "!NOTI";
880 break;
881 default:
882 tag = "!UNKN";
883 break;
884 }
885
886 trace_scmi_msg_dump(info->id, cinfo->id,
887 MSG_XTRACT_PROT_ID(msg_hdr),
888 MSG_XTRACT_ID(msg_hdr), tag,
889 MSG_XTRACT_TOKEN(msg_hdr), err, NULL, 0);
890}
891
892/**
893 * scmi_msg_response_validate - Validate message type against state of related
894 * xfer
895 *
896 * @cinfo: A reference to the channel descriptor.
897 * @msg_type: Message type to check
898 * @xfer: A reference to the xfer to validate against @msg_type
899 *
900 * This function checks if @msg_type is congruent with the current state of
901 * a pending @xfer; if an asynchronous delayed response is received before the
902 * related synchronous response (Out-of-Order Delayed Response) the missing
903 * synchronous response is assumed to be OK and completed, carrying on with the
904 * Delayed Response: this is done to address the case in which the underlying
905 * SCMI transport can deliver such out-of-order responses.
906 *
907 * Context: Assumes to be called with xfer->lock already acquired.
908 *
909 * Return: 0 on Success, error otherwise
910 */
911static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
912 u8 msg_type,
913 struct scmi_xfer *xfer)
914{
915 /*
916 * Even if a response was indeed expected on this slot at this point,
917 * a buggy platform could wrongly reply feeding us an unexpected
918 * delayed response we're not prepared to handle: bail-out safely
919 * blaming firmware.
920 */
921 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
922 dev_err(cinfo->dev,
923 "Delayed Response for %d not expected! Buggy F/W ?\n",
924 xfer->hdr.seq);
925 return -EINVAL;
926 }
927
928 switch (xfer->state) {
929 case SCMI_XFER_SENT_OK:
930 if (msg_type == MSG_TYPE_DELAYED_RESP) {
931 /*
932 * Delayed Response expected but delivered earlier.
933 * Assume message RESPONSE was OK and skip state.
934 */
935 xfer->hdr.status = SCMI_SUCCESS;
936 xfer->state = SCMI_XFER_RESP_OK;
937 complete(&xfer->done);
938 dev_warn(cinfo->dev,
939 "Received valid OoO Delayed Response for %d\n",
940 xfer->hdr.seq);
941 }
942 break;
943 case SCMI_XFER_RESP_OK:
944 if (msg_type != MSG_TYPE_DELAYED_RESP)
945 return -EINVAL;
946 break;
947 case SCMI_XFER_DRESP_OK:
948 /* No further message expected once in SCMI_XFER_DRESP_OK */
949 return -EINVAL;
950 }
951
952 return 0;
953}
954
955/**
956 * scmi_xfer_state_update - Update xfer state
957 *
958 * @xfer: A reference to the xfer to update
959 * @msg_type: Type of message being processed.
960 *
961 * Note that this message is assumed to have been already successfully validated
962 * by @scmi_msg_response_validate(), so here we just update the state.
963 *
964 * Context: Assumes to be called on an xfer exclusively acquired using the
965 * busy flag.
966 */
967static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
968{
969 xfer->hdr.type = msg_type;
970
971 /* Unknown command types were already discarded earlier */
972 if (xfer->hdr.type == MSG_TYPE_COMMAND)
973 xfer->state = SCMI_XFER_RESP_OK;
974 else
975 xfer->state = SCMI_XFER_DRESP_OK;
976}
977
978static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
979{
980 int ret;
981
982 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
983
984 return ret == SCMI_XFER_FREE;
985}
986
987/**
988 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
989 *
990 * @cinfo: A reference to the channel descriptor.
991 * @msg_hdr: A message header to use as lookup key
992 *
993 * When a valid xfer is found for the sequence number embedded in the provided
994 * msg_hdr, reference counting is properly updated and exclusive access to this
995 * xfer is granted till released with @scmi_xfer_command_release.
996 *
997 * Return: A valid @xfer on Success or error otherwise.
998 */
999static inline struct scmi_xfer *
1000scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
1001{
1002 int ret;
1003 unsigned long flags;
1004 struct scmi_xfer *xfer;
1005 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1006 struct scmi_xfers_info *minfo = &info->tx_minfo;
1007 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
1008 u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
1009
1010 /* Are we even expecting this? */
1011 spin_lock_irqsave(&minfo->xfer_lock, flags);
1012 xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
1013 if (IS_ERR(xfer)) {
1014 dev_err(cinfo->dev,
1015 "Message for %d type %d is not expected!\n",
1016 xfer_id, msg_type);
1017 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
1018
1019 scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED);
1020 scmi_inc_count(info->dbg, ERR_MSG_UNEXPECTED);
1021
1022 return xfer;
1023 }
1024 refcount_inc(&xfer->users);
1025 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
1026
1027 spin_lock_irqsave(&xfer->lock, flags);
1028 ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
1029 /*
1030 * If a pending xfer was found which was also in a congruent state with
1031 * the received message, acquire exclusive access to it setting the busy
1032 * flag.
1033 * Spins only on the rare limit condition of concurrent reception of
1034 * RESP and DRESP for the same xfer.
1035 */
1036 if (!ret) {
1037 spin_until_cond(scmi_xfer_acquired(xfer));
1038 scmi_xfer_state_update(xfer, msg_type);
1039 }
1040 spin_unlock_irqrestore(&xfer->lock, flags);
1041
1042 if (ret) {
1043 dev_err(cinfo->dev,
1044 "Invalid message type:%d for %d - HDR:0x%X state:%d\n",
1045 msg_type, xfer_id, msg_hdr, xfer->state);
1046
1047 scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID);
1048 scmi_inc_count(info->dbg, ERR_MSG_INVALID);
1049
1050 /* On error the refcount incremented above has to be dropped */
1051 __scmi_xfer_put(minfo, xfer);
1052 xfer = ERR_PTR(-EINVAL);
1053 }
1054
1055 return xfer;
1056}
1057
1058static inline void scmi_xfer_command_release(struct scmi_info *info,
1059 struct scmi_xfer *xfer)
1060{
1061 atomic_set(&xfer->busy, SCMI_XFER_FREE);
1062 __scmi_xfer_put(&info->tx_minfo, xfer);
1063}
1064
1065static inline void scmi_clear_channel(struct scmi_info *info,
1066 struct scmi_chan_info *cinfo)
1067{
1068 if (!cinfo->is_p2a) {
1069 dev_warn(cinfo->dev, "Invalid clear on A2P channel !\n");
1070 return;
1071 }
1072
1073 if (info->desc->ops->clear_channel)
1074 info->desc->ops->clear_channel(cinfo);
1075}
1076
1077static void scmi_handle_notification(struct scmi_chan_info *cinfo,
1078 u32 msg_hdr, void *priv)
1079{
1080 struct scmi_xfer *xfer;
1081 struct device *dev = cinfo->dev;
1082 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1083 struct scmi_xfers_info *minfo = &info->rx_minfo;
1084 ktime_t ts;
1085
1086 ts = ktime_get_boottime();
1087 xfer = scmi_xfer_get(cinfo->handle, minfo);
1088 if (IS_ERR(xfer)) {
1089 dev_err(dev, "failed to get free message slot (%ld)\n",
1090 PTR_ERR(xfer));
1091
1092 scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM);
1093 scmi_inc_count(info->dbg, ERR_MSG_NOMEM);
1094
1095 scmi_clear_channel(info, cinfo);
1096 return;
1097 }
1098
1099 unpack_scmi_header(msg_hdr, &xfer->hdr);
1100 if (priv)
1101 /* Ensure order between xfer->priv store and following ops */
1102 smp_store_mb(xfer->priv, priv);
1103 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
1104 xfer);
1105
1106 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1107 xfer->hdr.id, "NOTI", xfer->hdr.seq,
1108 xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
1109 scmi_inc_count(info->dbg, NOTIFICATION_OK);
1110
1111 scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
1112 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
1113
1114 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
1115 xfer->hdr.protocol_id, xfer->hdr.seq,
1116 MSG_TYPE_NOTIFICATION);
1117
1118 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1119 xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
1120 scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE,
1121 cinfo->id);
1122 }
1123
1124 __scmi_xfer_put(minfo, xfer);
1125
1126 scmi_clear_channel(info, cinfo);
1127}
1128
1129static void scmi_handle_response(struct scmi_chan_info *cinfo,
1130 u32 msg_hdr, void *priv)
1131{
1132 struct scmi_xfer *xfer;
1133 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1134
1135 xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
1136 if (IS_ERR(xfer)) {
1137 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
1138 scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv);
1139
1140 if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
1141 scmi_clear_channel(info, cinfo);
1142 return;
1143 }
1144
1145 /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
1146 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
1147 xfer->rx.len = info->desc->max_msg_size;
1148
1149 if (priv)
1150 /* Ensure order between xfer->priv store and following ops */
1151 smp_store_mb(xfer->priv, priv);
1152 info->desc->ops->fetch_response(cinfo, xfer);
1153
1154 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1155 xfer->hdr.id,
1156 xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
1157 (!SCMI_XFER_IS_RAW(xfer) ? "DLYD" : "dlyd") :
1158 (!SCMI_XFER_IS_RAW(xfer) ? "RESP" : "resp"),
1159 xfer->hdr.seq, xfer->hdr.status,
1160 xfer->rx.buf, xfer->rx.len);
1161
1162 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
1163 xfer->hdr.protocol_id, xfer->hdr.seq,
1164 xfer->hdr.type);
1165
1166 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
1167 scmi_clear_channel(info, cinfo);
1168 complete(xfer->async_done);
1169 scmi_inc_count(info->dbg, DELAYED_RESPONSE_OK);
1170 } else {
1171 complete(&xfer->done);
1172 scmi_inc_count(info->dbg, RESPONSE_OK);
1173 }
1174
1175 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1176 /*
1177 * When in polling mode avoid to queue the Raw xfer on the IRQ
1178 * RX path since it will be already queued at the end of the TX
1179 * poll loop.
1180 */
1181 if (!xfer->hdr.poll_completion ||
1182 xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
1183 scmi_raw_message_report(info->raw, xfer,
1184 SCMI_RAW_REPLY_QUEUE,
1185 cinfo->id);
1186 }
1187
1188 scmi_xfer_command_release(info, xfer);
1189}
1190
1191/**
1192 * scmi_rx_callback() - callback for receiving messages
1193 *
1194 * @cinfo: SCMI channel info
1195 * @msg_hdr: Message header
1196 * @priv: Transport specific private data.
1197 *
1198 * Processes one received message to appropriate transfer information and
1199 * signals completion of the transfer.
1200 *
1201 * NOTE: This function will be invoked in IRQ context, hence should be
1202 * as optimal as possible.
1203 */
1204static void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr,
1205 void *priv)
1206{
1207 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
1208
1209 switch (msg_type) {
1210 case MSG_TYPE_NOTIFICATION:
1211 scmi_handle_notification(cinfo, msg_hdr, priv);
1212 break;
1213 case MSG_TYPE_COMMAND:
1214 case MSG_TYPE_DELAYED_RESP:
1215 scmi_handle_response(cinfo, msg_hdr, priv);
1216 break;
1217 default:
1218 WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
1219 scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNKNOWN);
1220 break;
1221 }
1222}
1223
1224/**
1225 * xfer_put() - Release a transmit message
1226 *
1227 * @ph: Pointer to SCMI protocol handle
1228 * @xfer: message that was reserved by xfer_get_init
1229 */
1230static void xfer_put(const struct scmi_protocol_handle *ph,
1231 struct scmi_xfer *xfer)
1232{
1233 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1234 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1235
1236 __scmi_xfer_put(&info->tx_minfo, xfer);
1237}
1238
1239static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
1240 struct scmi_xfer *xfer, ktime_t stop,
1241 bool *ooo)
1242{
1243 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1244
1245 /*
1246 * Poll also on xfer->done so that polling can be forcibly terminated
1247 * in case of out-of-order receptions of delayed responses
1248 */
1249 return info->desc->ops->poll_done(cinfo, xfer) ||
1250 (*ooo = try_wait_for_completion(&xfer->done)) ||
1251 ktime_after(ktime_get(), stop);
1252}
1253
1254static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
1255 struct scmi_chan_info *cinfo,
1256 struct scmi_xfer *xfer, unsigned int timeout_ms)
1257{
1258 int ret = 0;
1259 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1260
1261 if (xfer->hdr.poll_completion) {
1262 /*
1263 * Real polling is needed only if transport has NOT declared
1264 * itself to support synchronous commands replies.
1265 */
1266 if (!desc->sync_cmds_completed_on_ret) {
1267 bool ooo = false;
1268
1269 /*
1270 * Poll on xfer using transport provided .poll_done();
1271 * assumes no completion interrupt was available.
1272 */
1273 ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
1274
1275 spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer,
1276 stop, &ooo));
1277 if (!ooo && !info->desc->ops->poll_done(cinfo, xfer)) {
1278 dev_err(dev,
1279 "timed out in resp(caller: %pS) - polling\n",
1280 (void *)_RET_IP_);
1281 ret = -ETIMEDOUT;
1282 scmi_inc_count(info->dbg, XFERS_RESPONSE_POLLED_TIMEOUT);
1283 }
1284 }
1285
1286 if (!ret) {
1287 unsigned long flags;
1288
1289 /*
1290 * Do not fetch_response if an out-of-order delayed
1291 * response is being processed.
1292 */
1293 spin_lock_irqsave(&xfer->lock, flags);
1294 if (xfer->state == SCMI_XFER_SENT_OK) {
1295 desc->ops->fetch_response(cinfo, xfer);
1296 xfer->state = SCMI_XFER_RESP_OK;
1297 }
1298 spin_unlock_irqrestore(&xfer->lock, flags);
1299
1300 /* Trace polled replies. */
1301 trace_scmi_msg_dump(info->id, cinfo->id,
1302 xfer->hdr.protocol_id, xfer->hdr.id,
1303 !SCMI_XFER_IS_RAW(xfer) ?
1304 "RESP" : "resp",
1305 xfer->hdr.seq, xfer->hdr.status,
1306 xfer->rx.buf, xfer->rx.len);
1307 scmi_inc_count(info->dbg, RESPONSE_POLLED_OK);
1308
1309 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1310 scmi_raw_message_report(info->raw, xfer,
1311 SCMI_RAW_REPLY_QUEUE,
1312 cinfo->id);
1313 }
1314 }
1315 } else {
1316 /* And we wait for the response. */
1317 if (!wait_for_completion_timeout(&xfer->done,
1318 msecs_to_jiffies(timeout_ms))) {
1319 dev_err(dev, "timed out in resp(caller: %pS)\n",
1320 (void *)_RET_IP_);
1321 ret = -ETIMEDOUT;
1322 scmi_inc_count(info->dbg, XFERS_RESPONSE_TIMEOUT);
1323 }
1324 }
1325
1326 return ret;
1327}
1328
1329/**
1330 * scmi_wait_for_message_response - An helper to group all the possible ways of
1331 * waiting for a synchronous message response.
1332 *
1333 * @cinfo: SCMI channel info
1334 * @xfer: Reference to the transfer being waited for.
1335 *
1336 * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
1337 * configuration flags like xfer->hdr.poll_completion.
1338 *
1339 * Return: 0 on Success, error otherwise.
1340 */
1341static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
1342 struct scmi_xfer *xfer)
1343{
1344 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1345 struct device *dev = info->dev;
1346
1347 trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
1348 xfer->hdr.protocol_id, xfer->hdr.seq,
1349 info->desc->max_rx_timeout_ms,
1350 xfer->hdr.poll_completion);
1351
1352 return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
1353 info->desc->max_rx_timeout_ms);
1354}
1355
1356/**
1357 * scmi_xfer_raw_wait_for_message_response - An helper to wait for a message
1358 * reply to an xfer raw request on a specific channel for the required timeout.
1359 *
1360 * @cinfo: SCMI channel info
1361 * @xfer: Reference to the transfer being waited for.
1362 * @timeout_ms: The maximum timeout in milliseconds
1363 *
1364 * Return: 0 on Success, error otherwise.
1365 */
1366int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
1367 struct scmi_xfer *xfer,
1368 unsigned int timeout_ms)
1369{
1370 int ret;
1371 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1372 struct device *dev = info->dev;
1373
1374 ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms);
1375 if (ret)
1376 dev_dbg(dev, "timed out in RAW response - HDR:%08X\n",
1377 pack_scmi_header(&xfer->hdr));
1378
1379 return ret;
1380}
1381
1382/**
1383 * do_xfer() - Do one transfer
1384 *
1385 * @ph: Pointer to SCMI protocol handle
1386 * @xfer: Transfer to initiate and wait for response
1387 *
1388 * Return: -ETIMEDOUT in case of no response, if transmit error,
1389 * return corresponding error, else if all goes well,
1390 * return 0.
1391 */
1392static int do_xfer(const struct scmi_protocol_handle *ph,
1393 struct scmi_xfer *xfer)
1394{
1395 int ret;
1396 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1397 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1398 struct device *dev = info->dev;
1399 struct scmi_chan_info *cinfo;
1400
1401 /* Check for polling request on custom command xfers at first */
1402 if (xfer->hdr.poll_completion &&
1403 !is_transport_polling_capable(info->desc)) {
1404 dev_warn_once(dev,
1405 "Polling mode is not supported by transport.\n");
1406 scmi_inc_count(info->dbg, SENT_FAIL_POLLING_UNSUPPORTED);
1407 return -EINVAL;
1408 }
1409
1410 cinfo = idr_find(&info->tx_idr, pi->proto->id);
1411 if (unlikely(!cinfo)) {
1412 scmi_inc_count(info->dbg, SENT_FAIL_CHANNEL_NOT_FOUND);
1413 return -EINVAL;
1414 }
1415 /* True ONLY if also supported by transport. */
1416 if (is_polling_enabled(cinfo, info->desc))
1417 xfer->hdr.poll_completion = true;
1418
1419 /*
1420 * Initialise protocol id now from protocol handle to avoid it being
1421 * overridden by mistake (or malice) by the protocol code mangling with
1422 * the scmi_xfer structure prior to this.
1423 */
1424 xfer->hdr.protocol_id = pi->proto->id;
1425 reinit_completion(&xfer->done);
1426
1427 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
1428 xfer->hdr.protocol_id, xfer->hdr.seq,
1429 xfer->hdr.poll_completion,
1430 scmi_inflight_count(&info->handle));
1431
1432 /* Clear any stale status */
1433 xfer->hdr.status = SCMI_SUCCESS;
1434 xfer->state = SCMI_XFER_SENT_OK;
1435 /*
1436 * Even though spinlocking is not needed here since no race is possible
1437 * on xfer->state due to the monotonically increasing tokens allocation,
1438 * we must anyway ensure xfer->state initialization is not re-ordered
1439 * after the .send_message() to be sure that on the RX path an early
1440 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
1441 */
1442 smp_mb();
1443
1444 ret = info->desc->ops->send_message(cinfo, xfer);
1445 if (ret < 0) {
1446 dev_dbg(dev, "Failed to send message %d\n", ret);
1447 scmi_inc_count(info->dbg, SENT_FAIL);
1448 return ret;
1449 }
1450
1451 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1452 xfer->hdr.id, "CMND", xfer->hdr.seq,
1453 xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
1454 scmi_inc_count(info->dbg, SENT_OK);
1455
1456 ret = scmi_wait_for_message_response(cinfo, xfer);
1457 if (!ret && xfer->hdr.status) {
1458 ret = scmi_to_linux_errno(xfer->hdr.status);
1459 scmi_inc_count(info->dbg, ERR_PROTOCOL);
1460 }
1461
1462 if (info->desc->ops->mark_txdone)
1463 info->desc->ops->mark_txdone(cinfo, ret, xfer);
1464
1465 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
1466 xfer->hdr.protocol_id, xfer->hdr.seq, ret,
1467 scmi_inflight_count(&info->handle));
1468
1469 return ret;
1470}
1471
1472static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
1473 struct scmi_xfer *xfer)
1474{
1475 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1476 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1477
1478 xfer->rx.len = info->desc->max_msg_size;
1479}
1480
1481/**
1482 * do_xfer_with_response() - Do one transfer and wait until the delayed
1483 * response is received
1484 *
1485 * @ph: Pointer to SCMI protocol handle
1486 * @xfer: Transfer to initiate and wait for response
1487 *
1488 * Using asynchronous commands in atomic/polling mode should be avoided since
1489 * it could cause long busy-waiting here, so ignore polling for the delayed
1490 * response and WARN if it was requested for this command transaction since
1491 * upper layers should refrain from issuing such kind of requests.
1492 *
1493 * The only other option would have been to refrain from using any asynchronous
1494 * command even if made available, when an atomic transport is detected, and
1495 * instead forcibly use the synchronous version (thing that can be easily
1496 * attained at the protocol layer), but this would also have led to longer
1497 * stalls of the channel for synchronous commands and possibly timeouts.
1498 * (in other words there is usually a good reason if a platform provides an
1499 * asynchronous version of a command and we should prefer to use it...just not
1500 * when using atomic/polling mode)
1501 *
1502 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
1503 * return corresponding error, else if all goes well, return 0.
1504 */
1505static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
1506 struct scmi_xfer *xfer)
1507{
1508 int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
1509 DECLARE_COMPLETION_ONSTACK(async_response);
1510
1511 xfer->async_done = &async_response;
1512
1513 /*
1514 * Delayed responses should not be polled, so an async command should
1515 * not have been used when requiring an atomic/poll context; WARN and
1516 * perform instead a sleeping wait.
1517 * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
1518 */
1519 WARN_ON_ONCE(xfer->hdr.poll_completion);
1520
1521 ret = do_xfer(ph, xfer);
1522 if (!ret) {
1523 if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
1524 dev_err(ph->dev,
1525 "timed out in delayed resp(caller: %pS)\n",
1526 (void *)_RET_IP_);
1527 ret = -ETIMEDOUT;
1528 } else if (xfer->hdr.status) {
1529 ret = scmi_to_linux_errno(xfer->hdr.status);
1530 }
1531 }
1532
1533 xfer->async_done = NULL;
1534 return ret;
1535}
1536
1537/**
1538 * xfer_get_init() - Allocate and initialise one message for transmit
1539 *
1540 * @ph: Pointer to SCMI protocol handle
1541 * @msg_id: Message identifier
1542 * @tx_size: transmit message size
1543 * @rx_size: receive message size
1544 * @p: pointer to the allocated and initialised message
1545 *
1546 * This function allocates the message using @scmi_xfer_get and
1547 * initialise the header.
1548 *
1549 * Return: 0 if all went fine with @p pointing to message, else
1550 * corresponding error.
1551 */
1552static int xfer_get_init(const struct scmi_protocol_handle *ph,
1553 u8 msg_id, size_t tx_size, size_t rx_size,
1554 struct scmi_xfer **p)
1555{
1556 int ret;
1557 struct scmi_xfer *xfer;
1558 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1559 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1560 struct scmi_xfers_info *minfo = &info->tx_minfo;
1561 struct device *dev = info->dev;
1562
1563 /* Ensure we have sane transfer sizes */
1564 if (rx_size > info->desc->max_msg_size ||
1565 tx_size > info->desc->max_msg_size)
1566 return -ERANGE;
1567
1568 xfer = scmi_xfer_get(pi->handle, minfo);
1569 if (IS_ERR(xfer)) {
1570 ret = PTR_ERR(xfer);
1571 dev_err(dev, "failed to get free message slot(%d)\n", ret);
1572 return ret;
1573 }
1574
1575 /* Pick a sequence number and register this xfer as in-flight */
1576 ret = scmi_xfer_pending_set(xfer, minfo);
1577 if (ret) {
1578 dev_err(pi->handle->dev,
1579 "Failed to get monotonic token %d\n", ret);
1580 __scmi_xfer_put(minfo, xfer);
1581 return ret;
1582 }
1583
1584 xfer->tx.len = tx_size;
1585 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
1586 xfer->hdr.type = MSG_TYPE_COMMAND;
1587 xfer->hdr.id = msg_id;
1588 xfer->hdr.poll_completion = false;
1589
1590 *p = xfer;
1591
1592 return 0;
1593}
1594
1595/**
1596 * version_get() - command to get the revision of the SCMI entity
1597 *
1598 * @ph: Pointer to SCMI protocol handle
1599 * @version: Holds returned version of protocol.
1600 *
1601 * Updates the SCMI information in the internal data structure.
1602 *
1603 * Return: 0 if all went fine, else return appropriate error.
1604 */
1605static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
1606{
1607 int ret;
1608 __le32 *rev_info;
1609 struct scmi_xfer *t;
1610
1611 ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
1612 if (ret)
1613 return ret;
1614
1615 ret = do_xfer(ph, t);
1616 if (!ret) {
1617 rev_info = t->rx.buf;
1618 *version = le32_to_cpu(*rev_info);
1619 }
1620
1621 xfer_put(ph, t);
1622 return ret;
1623}
1624
1625/**
1626 * scmi_set_protocol_priv - Set protocol specific data at init time
1627 *
1628 * @ph: A reference to the protocol handle.
1629 * @priv: The private data to set.
1630 *
1631 * Return: 0 on Success
1632 */
1633static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
1634 void *priv)
1635{
1636 struct scmi_protocol_instance *pi = ph_to_pi(ph);
1637
1638 pi->priv = priv;
1639
1640 return 0;
1641}
1642
1643/**
1644 * scmi_get_protocol_priv - Set protocol specific data at init time
1645 *
1646 * @ph: A reference to the protocol handle.
1647 *
1648 * Return: Protocol private data if any was set.
1649 */
1650static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
1651{
1652 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1653
1654 return pi->priv;
1655}
1656
1657static const struct scmi_xfer_ops xfer_ops = {
1658 .xfer_get_init = xfer_get_init,
1659 .reset_rx_to_maxsz = reset_rx_to_maxsz,
1660 .do_xfer = do_xfer,
1661 .do_xfer_with_response = do_xfer_with_response,
1662 .xfer_put = xfer_put,
1663};
1664
1665struct scmi_msg_resp_domain_name_get {
1666 __le32 flags;
1667 u8 name[SCMI_MAX_STR_SIZE];
1668};
1669
1670/**
1671 * scmi_common_extended_name_get - Common helper to get extended resources name
1672 * @ph: A protocol handle reference.
1673 * @cmd_id: The specific command ID to use.
1674 * @res_id: The specific resource ID to use.
1675 * @flags: A pointer to specific flags to use, if any.
1676 * @name: A pointer to the preallocated area where the retrieved name will be
1677 * stored as a NULL terminated string.
1678 * @len: The len in bytes of the @name char array.
1679 *
1680 * Return: 0 on Succcess
1681 */
1682static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
1683 u8 cmd_id, u32 res_id, u32 *flags,
1684 char *name, size_t len)
1685{
1686 int ret;
1687 size_t txlen;
1688 struct scmi_xfer *t;
1689 struct scmi_msg_resp_domain_name_get *resp;
1690
1691 txlen = !flags ? sizeof(res_id) : sizeof(res_id) + sizeof(*flags);
1692 ret = ph->xops->xfer_get_init(ph, cmd_id, txlen, sizeof(*resp), &t);
1693 if (ret)
1694 goto out;
1695
1696 put_unaligned_le32(res_id, t->tx.buf);
1697 if (flags)
1698 put_unaligned_le32(*flags, t->tx.buf + sizeof(res_id));
1699 resp = t->rx.buf;
1700
1701 ret = ph->xops->do_xfer(ph, t);
1702 if (!ret)
1703 strscpy(name, resp->name, len);
1704
1705 ph->xops->xfer_put(ph, t);
1706out:
1707 if (ret)
1708 dev_warn(ph->dev,
1709 "Failed to get extended name - id:%u (ret:%d). Using %s\n",
1710 res_id, ret, name);
1711 return ret;
1712}
1713
1714/**
1715 * scmi_common_get_max_msg_size - Get maximum message size
1716 * @ph: A protocol handle reference.
1717 *
1718 * Return: Maximum message size for the current protocol.
1719 */
1720static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph)
1721{
1722 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1723 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1724
1725 return info->desc->max_msg_size;
1726}
1727
1728/**
1729 * scmi_protocol_msg_check - Check protocol message attributes
1730 *
1731 * @ph: A reference to the protocol handle.
1732 * @message_id: The ID of the message to check.
1733 * @attributes: A parameter to optionally return the retrieved message
1734 * attributes, in case of Success.
1735 *
1736 * An helper to check protocol message attributes for a specific protocol
1737 * and message pair.
1738 *
1739 * Return: 0 on SUCCESS
1740 */
1741static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
1742 u32 message_id, u32 *attributes)
1743{
1744 int ret;
1745 struct scmi_xfer *t;
1746
1747 ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
1748 sizeof(__le32), 0, &t);
1749 if (ret)
1750 return ret;
1751
1752 put_unaligned_le32(message_id, t->tx.buf);
1753 ret = do_xfer(ph, t);
1754 if (!ret && attributes)
1755 *attributes = get_unaligned_le32(t->rx.buf);
1756 xfer_put(ph, t);
1757
1758 return ret;
1759}
1760
1761/**
1762 * struct scmi_iterator - Iterator descriptor
1763 * @msg: A reference to the message TX buffer; filled by @prepare_message with
1764 * a proper custom command payload for each multi-part command request.
1765 * @resp: A reference to the response RX buffer; used by @update_state and
1766 * @process_response to parse the multi-part replies.
1767 * @t: A reference to the underlying xfer initialized and used transparently by
1768 * the iterator internal routines.
1769 * @ph: A reference to the associated protocol handle to be used.
1770 * @ops: A reference to the custom provided iterator operations.
1771 * @state: The current iterator state; used and updated in turn by the iterators
1772 * internal routines and by the caller-provided @scmi_iterator_ops.
1773 * @priv: A reference to optional private data as provided by the caller and
1774 * passed back to the @@scmi_iterator_ops.
1775 */
1776struct scmi_iterator {
1777 void *msg;
1778 void *resp;
1779 struct scmi_xfer *t;
1780 const struct scmi_protocol_handle *ph;
1781 struct scmi_iterator_ops *ops;
1782 struct scmi_iterator_state state;
1783 void *priv;
1784};
1785
1786static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
1787 struct scmi_iterator_ops *ops,
1788 unsigned int max_resources, u8 msg_id,
1789 size_t tx_size, void *priv)
1790{
1791 int ret;
1792 struct scmi_iterator *i;
1793
1794 i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
1795 if (!i)
1796 return ERR_PTR(-ENOMEM);
1797
1798 i->ph = ph;
1799 i->ops = ops;
1800 i->priv = priv;
1801
1802 ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
1803 if (ret) {
1804 devm_kfree(ph->dev, i);
1805 return ERR_PTR(ret);
1806 }
1807
1808 i->state.max_resources = max_resources;
1809 i->msg = i->t->tx.buf;
1810 i->resp = i->t->rx.buf;
1811
1812 return i;
1813}
1814
1815static int scmi_iterator_run(void *iter)
1816{
1817 int ret = -EINVAL;
1818 struct scmi_iterator_ops *iops;
1819 const struct scmi_protocol_handle *ph;
1820 struct scmi_iterator_state *st;
1821 struct scmi_iterator *i = iter;
1822
1823 if (!i || !i->ops || !i->ph)
1824 return ret;
1825
1826 iops = i->ops;
1827 ph = i->ph;
1828 st = &i->state;
1829
1830 do {
1831 iops->prepare_message(i->msg, st->desc_index, i->priv);
1832 ret = ph->xops->do_xfer(ph, i->t);
1833 if (ret)
1834 break;
1835
1836 st->rx_len = i->t->rx.len;
1837 ret = iops->update_state(st, i->resp, i->priv);
1838 if (ret)
1839 break;
1840
1841 if (st->num_returned > st->max_resources - st->desc_index) {
1842 dev_err(ph->dev,
1843 "No. of resources can't exceed %d\n",
1844 st->max_resources);
1845 ret = -EINVAL;
1846 break;
1847 }
1848
1849 for (st->loop_idx = 0; st->loop_idx < st->num_returned;
1850 st->loop_idx++) {
1851 ret = iops->process_response(ph, i->resp, st, i->priv);
1852 if (ret)
1853 goto out;
1854 }
1855
1856 st->desc_index += st->num_returned;
1857 ph->xops->reset_rx_to_maxsz(ph, i->t);
1858 /*
1859 * check for both returned and remaining to avoid infinite
1860 * loop due to buggy firmware
1861 */
1862 } while (st->num_returned && st->num_remaining);
1863
1864out:
1865 /* Finalize and destroy iterator */
1866 ph->xops->xfer_put(ph, i->t);
1867 devm_kfree(ph->dev, i);
1868
1869 return ret;
1870}
1871
1872struct scmi_msg_get_fc_info {
1873 __le32 domain;
1874 __le32 message_id;
1875};
1876
1877struct scmi_msg_resp_desc_fc {
1878 __le32 attr;
1879#define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
1880#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
1881 __le32 rate_limit;
1882 __le32 chan_addr_low;
1883 __le32 chan_addr_high;
1884 __le32 chan_size;
1885 __le32 db_addr_low;
1886 __le32 db_addr_high;
1887 __le32 db_set_lmask;
1888 __le32 db_set_hmask;
1889 __le32 db_preserve_lmask;
1890 __le32 db_preserve_hmask;
1891};
1892
1893#define QUIRK_PERF_FC_FORCE \
1894 ({ \
1895 if (pi->proto->id == SCMI_PROTOCOL_PERF && \
1896 message_id == 0x8 /* PERF_LEVEL_GET */) \
1897 attributes |= BIT(0); \
1898 })
1899
1900static void
1901scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
1902 u8 describe_id, u32 message_id, u32 valid_size,
1903 u32 domain, void __iomem **p_addr,
1904 struct scmi_fc_db_info **p_db, u32 *rate_limit)
1905{
1906 int ret;
1907 u32 flags;
1908 u64 phys_addr;
1909 u32 attributes;
1910 u8 size;
1911 void __iomem *addr;
1912 struct scmi_xfer *t;
1913 struct scmi_fc_db_info *db = NULL;
1914 struct scmi_msg_get_fc_info *info;
1915 struct scmi_msg_resp_desc_fc *resp;
1916 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1917
1918 /* Check if the MSG_ID supports fastchannel */
1919 ret = scmi_protocol_msg_check(ph, message_id, &attributes);
1920 SCMI_QUIRK(perf_level_get_fc_force, QUIRK_PERF_FC_FORCE);
1921 if (ret || !MSG_SUPPORTS_FASTCHANNEL(attributes)) {
1922 dev_dbg(ph->dev,
1923 "Skip FC init for 0x%02X/%d domain:%d - ret:%d\n",
1924 pi->proto->id, message_id, domain, ret);
1925 return;
1926 }
1927
1928 if (!p_addr) {
1929 ret = -EINVAL;
1930 goto err_out;
1931 }
1932
1933 ret = ph->xops->xfer_get_init(ph, describe_id,
1934 sizeof(*info), sizeof(*resp), &t);
1935 if (ret)
1936 goto err_out;
1937
1938 info = t->tx.buf;
1939 info->domain = cpu_to_le32(domain);
1940 info->message_id = cpu_to_le32(message_id);
1941
1942 /*
1943 * Bail out on error leaving fc_info addresses zeroed; this includes
1944 * the case in which the requested domain/message_id does NOT support
1945 * fastchannels at all.
1946 */
1947 ret = ph->xops->do_xfer(ph, t);
1948 if (ret)
1949 goto err_xfer;
1950
1951 resp = t->rx.buf;
1952 flags = le32_to_cpu(resp->attr);
1953 size = le32_to_cpu(resp->chan_size);
1954 if (size != valid_size) {
1955 ret = -EINVAL;
1956 goto err_xfer;
1957 }
1958
1959 if (rate_limit)
1960 *rate_limit = le32_to_cpu(resp->rate_limit) & GENMASK(19, 0);
1961
1962 phys_addr = le32_to_cpu(resp->chan_addr_low);
1963 phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
1964 addr = devm_ioremap(ph->dev, phys_addr, size);
1965 if (!addr) {
1966 ret = -EADDRNOTAVAIL;
1967 goto err_xfer;
1968 }
1969
1970 *p_addr = addr;
1971
1972 if (p_db && SUPPORTS_DOORBELL(flags)) {
1973 db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
1974 if (!db) {
1975 ret = -ENOMEM;
1976 goto err_db;
1977 }
1978
1979 size = 1 << DOORBELL_REG_WIDTH(flags);
1980 phys_addr = le32_to_cpu(resp->db_addr_low);
1981 phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
1982 addr = devm_ioremap(ph->dev, phys_addr, size);
1983 if (!addr) {
1984 ret = -EADDRNOTAVAIL;
1985 goto err_db_mem;
1986 }
1987
1988 db->addr = addr;
1989 db->width = size;
1990 db->set = le32_to_cpu(resp->db_set_lmask);
1991 db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
1992 db->mask = le32_to_cpu(resp->db_preserve_lmask);
1993 db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
1994
1995 *p_db = db;
1996 }
1997
1998 ph->xops->xfer_put(ph, t);
1999
2000 dev_dbg(ph->dev,
2001 "Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
2002 pi->proto->id, message_id, domain);
2003
2004 return;
2005
2006err_db_mem:
2007 devm_kfree(ph->dev, db);
2008
2009err_db:
2010 *p_addr = NULL;
2011
2012err_xfer:
2013 ph->xops->xfer_put(ph, t);
2014
2015err_out:
2016 dev_warn(ph->dev,
2017 "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
2018 pi->proto->id, message_id, domain, ret);
2019}
2020
2021#define SCMI_PROTO_FC_RING_DB(w) \
2022do { \
2023 u##w val = 0; \
2024 \
2025 if (db->mask) \
2026 val = ioread##w(db->addr) & db->mask; \
2027 iowrite##w((u##w)db->set | val, db->addr); \
2028} while (0)
2029
2030static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
2031{
2032 if (!db || !db->addr)
2033 return;
2034
2035 if (db->width == 1)
2036 SCMI_PROTO_FC_RING_DB(8);
2037 else if (db->width == 2)
2038 SCMI_PROTO_FC_RING_DB(16);
2039 else if (db->width == 4)
2040 SCMI_PROTO_FC_RING_DB(32);
2041 else /* db->width == 8 */
2042 SCMI_PROTO_FC_RING_DB(64);
2043}
2044
2045static const struct scmi_proto_helpers_ops helpers_ops = {
2046 .extended_name_get = scmi_common_extended_name_get,
2047 .get_max_msg_size = scmi_common_get_max_msg_size,
2048 .iter_response_init = scmi_iterator_init,
2049 .iter_response_run = scmi_iterator_run,
2050 .protocol_msg_check = scmi_protocol_msg_check,
2051 .fastchannel_init = scmi_common_fastchannel_init,
2052 .fastchannel_db_ring = scmi_common_fastchannel_db_ring,
2053};
2054
2055/**
2056 * scmi_revision_area_get - Retrieve version memory area.
2057 *
2058 * @ph: A reference to the protocol handle.
2059 *
2060 * A helper to grab the version memory area reference during SCMI Base protocol
2061 * initialization.
2062 *
2063 * Return: A reference to the version memory area associated to the SCMI
2064 * instance underlying this protocol handle.
2065 */
2066struct scmi_revision_info *
2067scmi_revision_area_get(const struct scmi_protocol_handle *ph)
2068{
2069 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
2070
2071 return pi->handle->version;
2072}
2073
2074/**
2075 * scmi_protocol_version_negotiate - Negotiate protocol version
2076 *
2077 * @ph: A reference to the protocol handle.
2078 *
2079 * An helper to negotiate a protocol version different from the latest
2080 * advertised as supported from the platform: on Success backward
2081 * compatibility is assured by the platform.
2082 *
2083 * Return: 0 on Success
2084 */
2085static int scmi_protocol_version_negotiate(struct scmi_protocol_handle *ph)
2086{
2087 int ret;
2088 struct scmi_xfer *t;
2089 struct scmi_protocol_instance *pi = ph_to_pi(ph);
2090
2091 /* At first check if NEGOTIATE_PROTOCOL_VERSION is supported ... */
2092 ret = scmi_protocol_msg_check(ph, NEGOTIATE_PROTOCOL_VERSION, NULL);
2093 if (ret)
2094 return ret;
2095
2096 /* ... then attempt protocol version negotiation */
2097 ret = xfer_get_init(ph, NEGOTIATE_PROTOCOL_VERSION,
2098 sizeof(__le32), 0, &t);
2099 if (ret)
2100 return ret;
2101
2102 put_unaligned_le32(pi->proto->supported_version, t->tx.buf);
2103 ret = do_xfer(ph, t);
2104 if (!ret)
2105 pi->negotiated_version = pi->proto->supported_version;
2106
2107 xfer_put(ph, t);
2108
2109 return ret;
2110}
2111
2112/**
2113 * scmi_protocol_version_initialize - Initialize protocol version
2114 * @dev: A device reference.
2115 * @pi: A reference to the protocol instance being initialized
2116 *
2117 * At first retrieve the newest protocol version supported by the platform for
2118 * this specific protoocol.
2119 *
2120 * Negotiation is attempted only when the platform advertised a protocol
2121 * version newer than the most recent version known to this agent, since
2122 * backward compatibility is NOT assured in general between versions.
2123 *
2124 * Failing to negotiate a fallback version or to query supported version at
2125 * all will result in an attempt to use the newest version known to this agent
2126 * even though compatibility is NOT assured.
2127 *
2128 * Versions are defined as:
2129 *
2130 * pi->version: the version supported by the platform as returned by the query.
2131 * pi->proto->supported_version: the newest version supported by this agent
2132 * for this protocol.
2133 * pi->negotiated_version: The version successfully negotiated with the platform.
2134 * ph->version: The final version effectively chosen for this session.
2135 */
2136static void scmi_protocol_version_initialize(struct device *dev,
2137 struct scmi_protocol_instance *pi)
2138{
2139 struct scmi_protocol_handle *ph = &pi->ph;
2140 int ret;
2141
2142 /*
2143 * Query and store platform supported protocol version: this is usually
2144 * the newest version the platfom can support.
2145 */
2146 ret = version_get(ph, &pi->version);
2147 if (ret) {
2148 dev_warn(dev,
2149 "Failed to query supported version for protocol 0x%X.\n",
2150 pi->proto->id);
2151 goto best_effort;
2152 }
2153
2154 /* Need to negotiate at all ? */
2155 if (pi->version <= pi->proto->supported_version) {
2156 ph->version = pi->version;
2157 return;
2158 }
2159
2160 /* Attempt negotiation */
2161 ret = scmi_protocol_version_negotiate(ph);
2162 if (!ret) {
2163 ph->version = pi->negotiated_version;
2164 dev_info(dev,
2165 "Protocol 0x%X successfully negotiated version 0x%X\n",
2166 pi->proto->id, ph->version);
2167 return;
2168 }
2169
2170 dev_warn(dev,
2171 "Detected UNSUPPORTED higher version 0x%X for protocol 0x%X.\n",
2172 pi->version, pi->proto->id);
2173
2174best_effort:
2175 /* Fallback to use newest version known to this agent */
2176 ph->version = pi->proto->supported_version;
2177 dev_warn(dev,
2178 "Trying version 0x%X. Backward compatibility is NOT assured.\n",
2179 ph->version);
2180}
2181
2182/**
2183 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
2184 * instance descriptor.
2185 * @info: The reference to the related SCMI instance.
2186 * @proto: The protocol descriptor.
2187 *
2188 * Allocate a new protocol instance descriptor, using the provided @proto
2189 * description, against the specified SCMI instance @info, and initialize it;
2190 * all resources management is handled via a dedicated per-protocol devres
2191 * group.
2192 *
2193 * Context: Assumes to be called with @protocols_mtx already acquired.
2194 * Return: A reference to a freshly allocated and initialized protocol instance
2195 * or ERR_PTR on failure. On failure the @proto reference is at first
2196 * put using @scmi_protocol_put() before releasing all the devres group.
2197 */
2198static struct scmi_protocol_instance *
2199scmi_alloc_init_protocol_instance(struct scmi_info *info,
2200 const struct scmi_protocol *proto)
2201{
2202 int ret = -ENOMEM;
2203 void *gid;
2204 struct scmi_protocol_instance *pi;
2205 const struct scmi_handle *handle = &info->handle;
2206
2207 /* Protocol specific devres group */
2208 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
2209 if (!gid) {
2210 scmi_protocol_put(proto);
2211 goto out;
2212 }
2213
2214 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
2215 if (!pi)
2216 goto clean;
2217
2218 pi->gid = gid;
2219 pi->proto = proto;
2220 pi->handle = handle;
2221 pi->ph.dev = handle->dev;
2222 pi->ph.xops = &xfer_ops;
2223 pi->ph.hops = &helpers_ops;
2224 pi->ph.set_priv = scmi_set_protocol_priv;
2225 pi->ph.get_priv = scmi_get_protocol_priv;
2226 refcount_set(&pi->users, 1);
2227
2228 /*
2229 * Initialize effectively used protocol version performing any
2230 * possibly needed negotiations.
2231 */
2232 scmi_protocol_version_initialize(handle->dev, pi);
2233
2234 /* proto->init is assured NON NULL by scmi_protocol_register */
2235 ret = pi->proto->instance_init(&pi->ph);
2236 if (ret)
2237 goto clean;
2238
2239 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
2240 GFP_KERNEL);
2241 if (ret != proto->id)
2242 goto clean;
2243
2244 /*
2245 * Warn but ignore events registration errors since we do not want
2246 * to skip whole protocols if their notifications are messed up.
2247 */
2248 if (pi->proto->events) {
2249 ret = scmi_register_protocol_events(handle, pi->proto->id,
2250 &pi->ph,
2251 pi->proto->events);
2252 if (ret)
2253 dev_warn(handle->dev,
2254 "Protocol:%X - Events Registration Failed - err:%d\n",
2255 pi->proto->id, ret);
2256 }
2257
2258 devres_close_group(handle->dev, pi->gid);
2259 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
2260
2261 return pi;
2262
2263clean:
2264 /* Take care to put the protocol module's owner before releasing all */
2265 scmi_protocol_put(proto);
2266 devres_release_group(handle->dev, gid);
2267out:
2268 return ERR_PTR(ret);
2269}
2270
2271/**
2272 * scmi_get_protocol_instance - Protocol initialization helper.
2273 * @handle: A reference to the SCMI platform instance.
2274 * @protocol_id: The protocol being requested.
2275 *
2276 * In case the required protocol has never been requested before for this
2277 * instance, allocate and initialize all the needed structures while handling
2278 * resource allocation with a dedicated per-protocol devres subgroup.
2279 *
2280 * Return: A reference to an initialized protocol instance or error on failure:
2281 * in particular returns -EPROBE_DEFER when the desired protocol could
2282 * NOT be found.
2283 */
2284static struct scmi_protocol_instance * __must_check
2285scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
2286{
2287 struct scmi_protocol_instance *pi;
2288 struct scmi_info *info = handle_to_scmi_info(handle);
2289
2290 mutex_lock(&info->protocols_mtx);
2291 pi = idr_find(&info->protocols, protocol_id);
2292
2293 if (pi) {
2294 refcount_inc(&pi->users);
2295 } else {
2296 const struct scmi_protocol *proto;
2297
2298 /* Fails if protocol not registered on bus */
2299 proto = scmi_protocol_get(protocol_id, &info->version);
2300 if (proto)
2301 pi = scmi_alloc_init_protocol_instance(info, proto);
2302 else
2303 pi = ERR_PTR(-EPROBE_DEFER);
2304 }
2305 mutex_unlock(&info->protocols_mtx);
2306
2307 return pi;
2308}
2309
2310/**
2311 * scmi_protocol_acquire - Protocol acquire
2312 * @handle: A reference to the SCMI platform instance.
2313 * @protocol_id: The protocol being requested.
2314 *
2315 * Register a new user for the requested protocol on the specified SCMI
2316 * platform instance, possibly triggering its initialization on first user.
2317 *
2318 * Return: 0 if protocol was acquired successfully.
2319 */
2320int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
2321{
2322 return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
2323}
2324
2325/**
2326 * scmi_protocol_release - Protocol de-initialization helper.
2327 * @handle: A reference to the SCMI platform instance.
2328 * @protocol_id: The protocol being requested.
2329 *
2330 * Remove one user for the specified protocol and triggers de-initialization
2331 * and resources de-allocation once the last user has gone.
2332 */
2333void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
2334{
2335 struct scmi_info *info = handle_to_scmi_info(handle);
2336 struct scmi_protocol_instance *pi;
2337
2338 mutex_lock(&info->protocols_mtx);
2339 pi = idr_find(&info->protocols, protocol_id);
2340 if (WARN_ON(!pi))
2341 goto out;
2342
2343 if (refcount_dec_and_test(&pi->users)) {
2344 void *gid = pi->gid;
2345
2346 if (pi->proto->events)
2347 scmi_deregister_protocol_events(handle, protocol_id);
2348
2349 if (pi->proto->instance_deinit)
2350 pi->proto->instance_deinit(&pi->ph);
2351
2352 idr_remove(&info->protocols, protocol_id);
2353
2354 scmi_protocol_put(pi->proto);
2355
2356 devres_release_group(handle->dev, gid);
2357 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
2358 protocol_id);
2359 }
2360
2361out:
2362 mutex_unlock(&info->protocols_mtx);
2363}
2364
2365void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
2366 u8 *prot_imp)
2367{
2368 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
2369 struct scmi_info *info = handle_to_scmi_info(pi->handle);
2370
2371 info->protocols_imp = prot_imp;
2372}
2373
2374static bool
2375scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
2376{
2377 int i;
2378 struct scmi_info *info = handle_to_scmi_info(handle);
2379 struct scmi_revision_info *rev = handle->version;
2380
2381 if (!info->protocols_imp)
2382 return false;
2383
2384 for (i = 0; i < rev->num_protocols; i++)
2385 if (info->protocols_imp[i] == prot_id)
2386 return true;
2387 return false;
2388}
2389
2390struct scmi_protocol_devres {
2391 const struct scmi_handle *handle;
2392 u8 protocol_id;
2393};
2394
2395static void scmi_devm_release_protocol(struct device *dev, void *res)
2396{
2397 struct scmi_protocol_devres *dres = res;
2398
2399 scmi_protocol_release(dres->handle, dres->protocol_id);
2400}
2401
2402static struct scmi_protocol_instance __must_check *
2403scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id)
2404{
2405 struct scmi_protocol_instance *pi;
2406 struct scmi_protocol_devres *dres;
2407
2408 dres = devres_alloc(scmi_devm_release_protocol,
2409 sizeof(*dres), GFP_KERNEL);
2410 if (!dres)
2411 return ERR_PTR(-ENOMEM);
2412
2413 pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
2414 if (IS_ERR(pi)) {
2415 devres_free(dres);
2416 return pi;
2417 }
2418
2419 dres->handle = sdev->handle;
2420 dres->protocol_id = protocol_id;
2421 devres_add(&sdev->dev, dres);
2422
2423 return pi;
2424}
2425
2426/**
2427 * scmi_devm_protocol_get - Devres managed get protocol operations and handle
2428 * @sdev: A reference to an scmi_device whose embedded struct device is to
2429 * be used for devres accounting.
2430 * @protocol_id: The protocol being requested.
2431 * @ph: A pointer reference used to pass back the associated protocol handle.
2432 *
2433 * Get hold of a protocol accounting for its usage, eventually triggering its
2434 * initialization, and returning the protocol specific operations and related
2435 * protocol handle which will be used as first argument in most of the
2436 * protocols operations methods.
2437 * Being a devres based managed method, protocol hold will be automatically
2438 * released, and possibly de-initialized on last user, once the SCMI driver
2439 * owning the scmi_device is unbound from it.
2440 *
2441 * Return: A reference to the requested protocol operations or error.
2442 * Must be checked for errors by caller.
2443 */
2444static const void __must_check *
2445scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
2446 struct scmi_protocol_handle **ph)
2447{
2448 struct scmi_protocol_instance *pi;
2449
2450 if (!ph)
2451 return ERR_PTR(-EINVAL);
2452
2453 pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
2454 if (IS_ERR(pi))
2455 return pi;
2456
2457 *ph = &pi->ph;
2458
2459 return pi->proto->ops;
2460}
2461
2462/**
2463 * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
2464 * @sdev: A reference to an scmi_device whose embedded struct device is to
2465 * be used for devres accounting.
2466 * @protocol_id: The protocol being requested.
2467 *
2468 * Get hold of a protocol accounting for its usage, possibly triggering its
2469 * initialization but without getting access to its protocol specific operations
2470 * and handle.
2471 *
2472 * Being a devres based managed method, protocol hold will be automatically
2473 * released, and possibly de-initialized on last user, once the SCMI driver
2474 * owning the scmi_device is unbound from it.
2475 *
2476 * Return: 0 on SUCCESS
2477 */
2478static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev,
2479 u8 protocol_id)
2480{
2481 struct scmi_protocol_instance *pi;
2482
2483 pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
2484 if (IS_ERR(pi))
2485 return PTR_ERR(pi);
2486
2487 return 0;
2488}
2489
2490static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
2491{
2492 struct scmi_protocol_devres *dres = res;
2493
2494 if (WARN_ON(!dres || !data))
2495 return 0;
2496
2497 return dres->protocol_id == *((u8 *)data);
2498}
2499
2500/**
2501 * scmi_devm_protocol_put - Devres managed put protocol operations and handle
2502 * @sdev: A reference to an scmi_device whose embedded struct device is to
2503 * be used for devres accounting.
2504 * @protocol_id: The protocol being requested.
2505 *
2506 * Explicitly release a protocol hold previously obtained calling the above
2507 * @scmi_devm_protocol_get.
2508 */
2509static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
2510{
2511 int ret;
2512
2513 ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
2514 scmi_devm_protocol_match, &protocol_id);
2515 WARN_ON(ret);
2516}
2517
2518/**
2519 * scmi_is_transport_atomic - Method to check if underlying transport for an
2520 * SCMI instance is configured as atomic.
2521 *
2522 * @handle: A reference to the SCMI platform instance.
2523 * @atomic_threshold: An optional return value for the system wide currently
2524 * configured threshold for atomic operations.
2525 *
2526 * Return: True if transport is configured as atomic
2527 */
2528static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
2529 unsigned int *atomic_threshold)
2530{
2531 bool ret;
2532 struct scmi_info *info = handle_to_scmi_info(handle);
2533
2534 ret = info->desc->atomic_enabled &&
2535 is_transport_polling_capable(info->desc);
2536 if (ret && atomic_threshold)
2537 *atomic_threshold = info->desc->atomic_threshold;
2538
2539 return ret;
2540}
2541
2542/**
2543 * scmi_handle_get() - Get the SCMI handle for a device
2544 *
2545 * @dev: pointer to device for which we want SCMI handle
2546 *
2547 * NOTE: The function does not track individual clients of the framework
2548 * and is expected to be maintained by caller of SCMI protocol library.
2549 * scmi_handle_put must be balanced with successful scmi_handle_get
2550 *
2551 * Return: pointer to handle if successful, NULL on error
2552 */
2553static struct scmi_handle *scmi_handle_get(struct device *dev)
2554{
2555 struct list_head *p;
2556 struct scmi_info *info;
2557 struct scmi_handle *handle = NULL;
2558
2559 mutex_lock(&scmi_list_mutex);
2560 list_for_each(p, &scmi_list) {
2561 info = list_entry(p, struct scmi_info, node);
2562 if (dev->parent == info->dev) {
2563 info->users++;
2564 handle = &info->handle;
2565 break;
2566 }
2567 }
2568 mutex_unlock(&scmi_list_mutex);
2569
2570 return handle;
2571}
2572
2573/**
2574 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
2575 *
2576 * @handle: handle acquired by scmi_handle_get
2577 *
2578 * NOTE: The function does not track individual clients of the framework
2579 * and is expected to be maintained by caller of SCMI protocol library.
2580 * scmi_handle_put must be balanced with successful scmi_handle_get
2581 *
2582 * Return: 0 is successfully released
2583 * if null was passed, it returns -EINVAL;
2584 */
2585static int scmi_handle_put(const struct scmi_handle *handle)
2586{
2587 struct scmi_info *info;
2588
2589 if (!handle)
2590 return -EINVAL;
2591
2592 info = handle_to_scmi_info(handle);
2593 mutex_lock(&scmi_list_mutex);
2594 if (!WARN_ON(!info->users))
2595 info->users--;
2596 mutex_unlock(&scmi_list_mutex);
2597
2598 return 0;
2599}
2600
2601static void scmi_device_link_add(struct device *consumer,
2602 struct device *supplier)
2603{
2604 struct device_link *link;
2605
2606 link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
2607
2608 WARN_ON(!link);
2609}
2610
2611static void scmi_set_handle(struct scmi_device *scmi_dev)
2612{
2613 scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
2614 if (scmi_dev->handle)
2615 scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
2616}
2617
2618static int __scmi_xfer_info_init(struct scmi_info *sinfo,
2619 struct scmi_xfers_info *info)
2620{
2621 int i;
2622 struct scmi_xfer *xfer;
2623 struct device *dev = sinfo->dev;
2624 const struct scmi_desc *desc = sinfo->desc;
2625
2626 /* Pre-allocated messages, no more than what hdr.seq can support */
2627 if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
2628 dev_err(dev,
2629 "Invalid maximum messages %d, not in range [1 - %lu]\n",
2630 info->max_msg, MSG_TOKEN_MAX);
2631 return -EINVAL;
2632 }
2633
2634 hash_init(info->pending_xfers);
2635
2636 /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
2637 info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX,
2638 GFP_KERNEL);
2639 if (!info->xfer_alloc_table)
2640 return -ENOMEM;
2641
2642 /*
2643 * Preallocate a number of xfers equal to max inflight messages,
2644 * pre-initialize the buffer pointer to pre-allocated buffers and
2645 * attach all of them to the free list
2646 */
2647 INIT_HLIST_HEAD(&info->free_xfers);
2648 for (i = 0; i < info->max_msg; i++) {
2649 xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
2650 if (!xfer)
2651 return -ENOMEM;
2652
2653 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
2654 GFP_KERNEL);
2655 if (!xfer->rx.buf)
2656 return -ENOMEM;
2657
2658 xfer->tx.buf = xfer->rx.buf;
2659 init_completion(&xfer->done);
2660 spin_lock_init(&xfer->lock);
2661
2662 /* Add initialized xfer to the free list */
2663 hlist_add_head(&xfer->node, &info->free_xfers);
2664 }
2665
2666 spin_lock_init(&info->xfer_lock);
2667
2668 return 0;
2669}
2670
2671static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
2672{
2673 const struct scmi_desc *desc = sinfo->desc;
2674
2675 if (!desc->ops->get_max_msg) {
2676 sinfo->tx_minfo.max_msg = desc->max_msg;
2677 sinfo->rx_minfo.max_msg = desc->max_msg;
2678 } else {
2679 struct scmi_chan_info *base_cinfo;
2680
2681 base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
2682 if (!base_cinfo)
2683 return -EINVAL;
2684 sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
2685
2686 /* RX channel is optional so can be skipped */
2687 base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
2688 if (base_cinfo)
2689 sinfo->rx_minfo.max_msg =
2690 desc->ops->get_max_msg(base_cinfo);
2691 }
2692
2693 return 0;
2694}
2695
2696static int scmi_xfer_info_init(struct scmi_info *sinfo)
2697{
2698 int ret;
2699
2700 ret = scmi_channels_max_msg_configure(sinfo);
2701 if (ret)
2702 return ret;
2703
2704 ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
2705 if (!ret && !idr_is_empty(&sinfo->rx_idr))
2706 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
2707
2708 return ret;
2709}
2710
2711static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
2712 int prot_id, bool tx)
2713{
2714 int ret, idx;
2715 char name[32];
2716 struct scmi_chan_info *cinfo;
2717 struct idr *idr;
2718 struct scmi_device *tdev = NULL;
2719
2720 /* Transmit channel is first entry i.e. index 0 */
2721 idx = tx ? 0 : 1;
2722 idr = tx ? &info->tx_idr : &info->rx_idr;
2723
2724 if (!info->desc->ops->chan_available(of_node, idx)) {
2725 cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
2726 if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
2727 return -EINVAL;
2728 goto idr_alloc;
2729 }
2730
2731 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
2732 if (!cinfo)
2733 return -ENOMEM;
2734
2735 cinfo->is_p2a = !tx;
2736 cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
2737 cinfo->max_msg_size = info->desc->max_msg_size;
2738 cinfo->no_completion_irq = info->desc->no_completion_irq;
2739
2740 /* Create a unique name for this transport device */
2741 snprintf(name, 32, "__scmi_transport_device_%s_%02X",
2742 idx ? "rx" : "tx", prot_id);
2743 /* Create a uniquely named, dedicated transport device for this chan */
2744 tdev = scmi_device_create(of_node, info->dev, prot_id, name);
2745 if (!tdev) {
2746 dev_err(info->dev,
2747 "failed to create transport device (%s)\n", name);
2748 devm_kfree(info->dev, cinfo);
2749 return -EINVAL;
2750 }
2751 of_node_get(of_node);
2752
2753 cinfo->id = prot_id;
2754 cinfo->dev = &tdev->dev;
2755 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
2756 if (ret) {
2757 of_node_put(of_node);
2758 scmi_device_destroy(info->dev, prot_id, name);
2759 devm_kfree(info->dev, cinfo);
2760 return ret;
2761 }
2762
2763 if (tx && is_polling_required(cinfo, info->desc)) {
2764 if (is_transport_polling_capable(info->desc))
2765 dev_info(&tdev->dev,
2766 "Enabled polling mode TX channel - prot_id:%d\n",
2767 prot_id);
2768 else
2769 dev_warn(&tdev->dev,
2770 "Polling mode NOT supported by transport.\n");
2771 }
2772
2773idr_alloc:
2774 ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
2775 if (ret != prot_id) {
2776 dev_err(info->dev,
2777 "unable to allocate SCMI idr slot err %d\n", ret);
2778 /* Destroy channel and device only if created by this call. */
2779 if (tdev) {
2780 of_node_put(of_node);
2781 scmi_device_destroy(info->dev, prot_id, name);
2782 devm_kfree(info->dev, cinfo);
2783 }
2784 return ret;
2785 }
2786
2787 cinfo->handle = &info->handle;
2788 return 0;
2789}
2790
2791static inline int
2792scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
2793 int prot_id)
2794{
2795 int ret = scmi_chan_setup(info, of_node, prot_id, true);
2796
2797 if (!ret) {
2798 /* Rx is optional, report only memory errors */
2799 ret = scmi_chan_setup(info, of_node, prot_id, false);
2800 if (ret && ret != -ENOMEM)
2801 ret = 0;
2802 }
2803
2804 if (ret)
2805 dev_err(info->dev,
2806 "failed to setup channel for protocol:0x%X\n", prot_id);
2807
2808 return ret;
2809}
2810
2811/**
2812 * scmi_channels_setup - Helper to initialize all required channels
2813 *
2814 * @info: The SCMI instance descriptor.
2815 *
2816 * Initialize all the channels found described in the DT against the underlying
2817 * configured transport using custom defined dedicated devices instead of
2818 * borrowing devices from the SCMI drivers; this way channels are initialized
2819 * upfront during core SCMI stack probing and are no more coupled with SCMI
2820 * devices used by SCMI drivers.
2821 *
2822 * Note that, even though a pair of TX/RX channels is associated to each
2823 * protocol defined in the DT, a distinct freshly initialized channel is
2824 * created only if the DT node for the protocol at hand describes a dedicated
2825 * channel: in all the other cases the common BASE protocol channel is reused.
2826 *
2827 * Return: 0 on Success
2828 */
2829static int scmi_channels_setup(struct scmi_info *info)
2830{
2831 int ret;
2832 struct device_node *top_np = info->dev->of_node;
2833
2834 /* Initialize a common generic channel at first */
2835 ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE);
2836 if (ret)
2837 return ret;
2838
2839 for_each_available_child_of_node_scoped(top_np, child) {
2840 u32 prot_id;
2841
2842 if (of_property_read_u32(child, "reg", &prot_id))
2843 continue;
2844
2845 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2846 dev_err(info->dev,
2847 "Out of range protocol %d\n", prot_id);
2848
2849 ret = scmi_txrx_setup(info, child, prot_id);
2850 if (ret)
2851 return ret;
2852 }
2853
2854 return 0;
2855}
2856
2857static int scmi_chan_destroy(int id, void *p, void *idr)
2858{
2859 struct scmi_chan_info *cinfo = p;
2860
2861 if (cinfo->dev) {
2862 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
2863 struct scmi_device *sdev = to_scmi_dev(cinfo->dev);
2864
2865 of_node_put(cinfo->dev->of_node);
2866 scmi_device_destroy(info->dev, id, sdev->name);
2867 cinfo->dev = NULL;
2868 }
2869
2870 idr_remove(idr, id);
2871
2872 return 0;
2873}
2874
2875static void scmi_cleanup_channels(struct scmi_info *info, struct idr *idr)
2876{
2877 /* At first free all channels at the transport layer ... */
2878 idr_for_each(idr, info->desc->ops->chan_free, idr);
2879
2880 /* ...then destroy all underlying devices */
2881 idr_for_each(idr, scmi_chan_destroy, idr);
2882
2883 idr_destroy(idr);
2884}
2885
2886static void scmi_cleanup_txrx_channels(struct scmi_info *info)
2887{
2888 scmi_cleanup_channels(info, &info->tx_idr);
2889
2890 scmi_cleanup_channels(info, &info->rx_idr);
2891}
2892
2893static int scmi_bus_notifier(struct notifier_block *nb,
2894 unsigned long action, void *data)
2895{
2896 struct scmi_info *info = bus_nb_to_scmi_info(nb);
2897 struct scmi_device *sdev = to_scmi_dev(data);
2898
2899 /* Skip devices of different SCMI instances */
2900 if (sdev->dev.parent != info->dev)
2901 return NOTIFY_DONE;
2902
2903 switch (action) {
2904 case BUS_NOTIFY_BIND_DRIVER:
2905 /* setup handle now as the transport is ready */
2906 scmi_set_handle(sdev);
2907 break;
2908 case BUS_NOTIFY_UNBOUND_DRIVER:
2909 scmi_handle_put(sdev->handle);
2910 sdev->handle = NULL;
2911 break;
2912 default:
2913 return NOTIFY_DONE;
2914 }
2915
2916 dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev),
2917 sdev->name, action == BUS_NOTIFY_BIND_DRIVER ?
2918 "about to be BOUND." : "UNBOUND.");
2919
2920 return NOTIFY_OK;
2921}
2922
2923static int scmi_device_request_notifier(struct notifier_block *nb,
2924 unsigned long action, void *data)
2925{
2926 struct device_node *np;
2927 struct scmi_device_id *id_table = data;
2928 struct scmi_info *info = req_nb_to_scmi_info(nb);
2929
2930 np = idr_find(&info->active_protocols, id_table->protocol_id);
2931 if (!np)
2932 return NOTIFY_DONE;
2933
2934 dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n",
2935 action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-",
2936 id_table->name, id_table->protocol_id);
2937
2938 switch (action) {
2939 case SCMI_BUS_NOTIFY_DEVICE_REQUEST:
2940 scmi_create_protocol_devices(np, info, id_table->protocol_id,
2941 id_table->name);
2942 break;
2943 case SCMI_BUS_NOTIFY_DEVICE_UNREQUEST:
2944 scmi_destroy_protocol_devices(info, id_table->protocol_id,
2945 id_table->name);
2946 break;
2947 default:
2948 return NOTIFY_DONE;
2949 }
2950
2951 return NOTIFY_OK;
2952}
2953
2954static const char * const dbg_counter_strs[] = {
2955 "sent_ok",
2956 "sent_fail",
2957 "sent_fail_polling_unsupported",
2958 "sent_fail_channel_not_found",
2959 "response_ok",
2960 "notification_ok",
2961 "delayed_response_ok",
2962 "xfers_response_timeout",
2963 "xfers_response_polled_timeout",
2964 "response_polled_ok",
2965 "err_msg_unexpected",
2966 "err_msg_invalid",
2967 "err_msg_nomem",
2968 "err_protocol",
2969 "xfers_inflight",
2970};
2971
2972static ssize_t reset_all_on_write(struct file *filp, const char __user *buf,
2973 size_t count, loff_t *ppos)
2974{
2975 struct scmi_debug_info *dbg = filp->private_data;
2976
2977 for (int i = 0; i < SCMI_DEBUG_COUNTERS_LAST; i++)
2978 atomic_set(&dbg->counters[i], 0);
2979
2980 return count;
2981}
2982
2983static const struct file_operations fops_reset_counts = {
2984 .owner = THIS_MODULE,
2985 .open = simple_open,
2986 .write = reset_all_on_write,
2987};
2988
2989static void scmi_debugfs_counters_setup(struct scmi_debug_info *dbg,
2990 struct dentry *trans)
2991{
2992 struct dentry *counters;
2993 int idx;
2994
2995 counters = debugfs_create_dir("counters", trans);
2996
2997 for (idx = 0; idx < SCMI_DEBUG_COUNTERS_LAST; idx++)
2998 debugfs_create_atomic_t(dbg_counter_strs[idx], 0600, counters,
2999 &dbg->counters[idx]);
3000
3001 debugfs_create_file("reset", 0200, counters, dbg, &fops_reset_counts);
3002}
3003
3004static void scmi_debugfs_common_cleanup(void *d)
3005{
3006 struct scmi_debug_info *dbg = d;
3007
3008 if (!dbg)
3009 return;
3010
3011 debugfs_remove_recursive(dbg->top_dentry);
3012 kfree(dbg->name);
3013 kfree(dbg->type);
3014}
3015
3016static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
3017{
3018 char top_dir[16];
3019 struct dentry *trans, *top_dentry;
3020 struct scmi_debug_info *dbg;
3021 const char *c_ptr = NULL;
3022
3023 dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL);
3024 if (!dbg)
3025 return NULL;
3026
3027 dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL);
3028 if (!dbg->name) {
3029 devm_kfree(info->dev, dbg);
3030 return NULL;
3031 }
3032
3033 of_property_read_string(info->dev->of_node, "compatible", &c_ptr);
3034 dbg->type = kstrdup(c_ptr, GFP_KERNEL);
3035 if (!dbg->type) {
3036 kfree(dbg->name);
3037 devm_kfree(info->dev, dbg);
3038 return NULL;
3039 }
3040
3041 snprintf(top_dir, 16, "%d", info->id);
3042 top_dentry = debugfs_create_dir(top_dir, scmi_top_dentry);
3043 trans = debugfs_create_dir("transport", top_dentry);
3044
3045 dbg->is_atomic = info->desc->atomic_enabled &&
3046 is_transport_polling_capable(info->desc);
3047
3048 debugfs_create_str("instance_name", 0400, top_dentry,
3049 (char **)&dbg->name);
3050
3051 debugfs_create_u32("atomic_threshold_us", 0400, top_dentry,
3052 (u32 *)&info->desc->atomic_threshold);
3053
3054 debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
3055
3056 debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic);
3057
3058 debugfs_create_u32("max_rx_timeout_ms", 0400, trans,
3059 (u32 *)&info->desc->max_rx_timeout_ms);
3060
3061 debugfs_create_u32("max_msg_size", 0400, trans,
3062 (u32 *)&info->desc->max_msg_size);
3063
3064 debugfs_create_u32("tx_max_msg", 0400, trans,
3065 (u32 *)&info->tx_minfo.max_msg);
3066
3067 debugfs_create_u32("rx_max_msg", 0400, trans,
3068 (u32 *)&info->rx_minfo.max_msg);
3069
3070 if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
3071 scmi_debugfs_counters_setup(dbg, trans);
3072
3073 dbg->top_dentry = top_dentry;
3074
3075 if (devm_add_action_or_reset(info->dev,
3076 scmi_debugfs_common_cleanup, dbg))
3077 return NULL;
3078
3079 return dbg;
3080}
3081
3082static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
3083{
3084 int id, num_chans = 0, ret = 0;
3085 struct scmi_chan_info *cinfo;
3086 u8 channels[SCMI_MAX_CHANNELS] = {};
3087 DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
3088
3089 /* Enumerate all channels to collect their ids */
3090 idr_for_each_entry(&info->tx_idr, cinfo, id) {
3091 /*
3092 * Cannot happen, but be defensive.
3093 * Zero as num_chans is ok, warn and carry on.
3094 */
3095 if (num_chans >= SCMI_MAX_CHANNELS || !cinfo) {
3096 dev_warn(info->dev,
3097 "SCMI RAW - Error enumerating channels\n");
3098 break;
3099 }
3100
3101 if (!test_bit(cinfo->id, protos)) {
3102 channels[num_chans++] = cinfo->id;
3103 set_bit(cinfo->id, protos);
3104 }
3105 }
3106
3107 info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry,
3108 info->id, channels, num_chans,
3109 info->desc, info->tx_minfo.max_msg);
3110 if (IS_ERR(info->raw)) {
3111 dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n");
3112 ret = PTR_ERR(info->raw);
3113 info->raw = NULL;
3114 }
3115
3116 return ret;
3117}
3118
3119static const struct scmi_desc *scmi_transport_setup(struct device *dev)
3120{
3121 struct scmi_transport *trans;
3122 int ret;
3123
3124 trans = dev_get_platdata(dev);
3125 if (!trans || !trans->supplier || !trans->core_ops)
3126 return NULL;
3127
3128 if (!device_link_add(dev, trans->supplier, DL_FLAG_AUTOREMOVE_CONSUMER)) {
3129 dev_err(dev,
3130 "Adding link to supplier transport device failed\n");
3131 return NULL;
3132 }
3133
3134 /* Provide core transport ops */
3135 *trans->core_ops = &scmi_trans_core_ops;
3136
3137 dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier));
3138
3139 ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms",
3140 &trans->desc.max_rx_timeout_ms);
3141 if (ret && ret != -EINVAL)
3142 dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n");
3143
3144 ret = of_property_read_u32(dev->of_node, "arm,max-msg-size",
3145 &trans->desc.max_msg_size);
3146 if (ret && ret != -EINVAL)
3147 dev_err(dev, "Malformed arm,max-msg-size DT property.\n");
3148
3149 ret = of_property_read_u32(dev->of_node, "arm,max-msg",
3150 &trans->desc.max_msg);
3151 if (ret && ret != -EINVAL)
3152 dev_err(dev, "Malformed arm,max-msg DT property.\n");
3153
3154 trans->desc.no_completion_irq = of_property_read_bool(dev->of_node,
3155 "arm,no-completion-irq");
3156
3157 dev_info(dev,
3158 "SCMI max-rx-timeout: %dms / max-msg-size: %dbytes / max-msg: %d\n",
3159 trans->desc.max_rx_timeout_ms, trans->desc.max_msg_size,
3160 trans->desc.max_msg);
3161
3162 /* System wide atomic threshold for atomic ops .. if any */
3163 if (!of_property_read_u32(dev->of_node, "atomic-threshold-us",
3164 &trans->desc.atomic_threshold))
3165 dev_info(dev,
3166 "SCMI System wide atomic threshold set to %u us\n",
3167 trans->desc.atomic_threshold);
3168
3169 return &trans->desc;
3170}
3171
3172static void scmi_enable_matching_quirks(struct scmi_info *info)
3173{
3174 struct scmi_revision_info *rev = &info->version;
3175
3176 dev_dbg(info->dev, "Looking for quirks matching: %s/%s/0x%08X\n",
3177 rev->vendor_id, rev->sub_vendor_id, rev->impl_ver);
3178
3179 /* Enable applicable quirks */
3180 scmi_quirks_enable(info->dev, rev->vendor_id,
3181 rev->sub_vendor_id, rev->impl_ver);
3182}
3183
3184static int scmi_probe(struct platform_device *pdev)
3185{
3186 int ret;
3187 char *err_str = "probe failure\n";
3188 struct scmi_handle *handle;
3189 const struct scmi_desc *desc;
3190 struct scmi_info *info;
3191 bool coex = IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
3192 struct device *dev = &pdev->dev;
3193 struct device_node *child, *np = dev->of_node;
3194
3195 desc = scmi_transport_setup(dev);
3196 if (!desc) {
3197 err_str = "transport invalid\n";
3198 ret = -EINVAL;
3199 goto out_err;
3200 }
3201
3202 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
3203 if (!info)
3204 return -ENOMEM;
3205
3206 info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL);
3207 if (info->id < 0)
3208 return info->id;
3209
3210 info->dev = dev;
3211 info->desc = desc;
3212 info->bus_nb.notifier_call = scmi_bus_notifier;
3213 info->dev_req_nb.notifier_call = scmi_device_request_notifier;
3214 INIT_LIST_HEAD(&info->node);
3215 idr_init(&info->protocols);
3216 mutex_init(&info->protocols_mtx);
3217 idr_init(&info->active_protocols);
3218 mutex_init(&info->devreq_mtx);
3219
3220 platform_set_drvdata(pdev, info);
3221 idr_init(&info->tx_idr);
3222 idr_init(&info->rx_idr);
3223
3224 handle = &info->handle;
3225 handle->dev = info->dev;
3226 handle->version = &info->version;
3227 handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
3228 handle->devm_protocol_get = scmi_devm_protocol_get;
3229 handle->devm_protocol_put = scmi_devm_protocol_put;
3230 handle->is_transport_atomic = scmi_is_transport_atomic;
3231
3232 /* Setup all channels described in the DT at first */
3233 ret = scmi_channels_setup(info);
3234 if (ret) {
3235 err_str = "failed to setup channels\n";
3236 goto clear_ida;
3237 }
3238
3239 ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb);
3240 if (ret) {
3241 err_str = "failed to register bus notifier\n";
3242 goto clear_txrx_setup;
3243 }
3244
3245 ret = blocking_notifier_chain_register(&scmi_requested_devices_nh,
3246 &info->dev_req_nb);
3247 if (ret) {
3248 err_str = "failed to register device notifier\n";
3249 goto clear_bus_notifier;
3250 }
3251
3252 ret = scmi_xfer_info_init(info);
3253 if (ret) {
3254 err_str = "failed to init xfers pool\n";
3255 goto clear_dev_req_notifier;
3256 }
3257
3258 if (scmi_top_dentry) {
3259 info->dbg = scmi_debugfs_common_setup(info);
3260 if (!info->dbg)
3261 dev_warn(dev, "Failed to setup SCMI debugfs.\n");
3262
3263 if (info->dbg && IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
3264 ret = scmi_debugfs_raw_mode_setup(info);
3265 if (!coex) {
3266 if (ret)
3267 goto clear_dev_req_notifier;
3268
3269 /* Bail out anyway when coex disabled. */
3270 return 0;
3271 }
3272
3273 /* Coex enabled, carry on in any case. */
3274 dev_info(dev, "SCMI RAW Mode COEX enabled !\n");
3275 }
3276 }
3277
3278 if (scmi_notification_init(handle))
3279 dev_err(dev, "SCMI Notifications NOT available.\n");
3280
3281 if (info->desc->atomic_enabled &&
3282 !is_transport_polling_capable(info->desc))
3283 dev_err(dev,
3284 "Transport is not polling capable. Atomic mode not supported.\n");
3285
3286 /*
3287 * Trigger SCMI Base protocol initialization.
3288 * It's mandatory and won't be ever released/deinit until the
3289 * SCMI stack is shutdown/unloaded as a whole.
3290 */
3291 ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
3292 if (ret) {
3293 err_str = "unable to communicate with SCMI\n";
3294 if (coex) {
3295 dev_err(dev, "%s", err_str);
3296 return 0;
3297 }
3298 goto notification_exit;
3299 }
3300
3301 mutex_lock(&scmi_list_mutex);
3302 list_add_tail(&info->node, &scmi_list);
3303 mutex_unlock(&scmi_list_mutex);
3304
3305 scmi_enable_matching_quirks(info);
3306
3307 for_each_available_child_of_node(np, child) {
3308 u32 prot_id;
3309
3310 if (of_property_read_u32(child, "reg", &prot_id))
3311 continue;
3312
3313 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
3314 dev_err(dev, "Out of range protocol %d\n", prot_id);
3315
3316 if (!scmi_is_protocol_implemented(handle, prot_id)) {
3317 dev_err(dev, "SCMI protocol %d not implemented\n",
3318 prot_id);
3319 continue;
3320 }
3321
3322 /*
3323 * Save this valid DT protocol descriptor amongst
3324 * @active_protocols for this SCMI instance/
3325 */
3326 ret = idr_alloc(&info->active_protocols, child,
3327 prot_id, prot_id + 1, GFP_KERNEL);
3328 if (ret != prot_id) {
3329 dev_err(dev, "SCMI protocol %d already activated. Skip\n",
3330 prot_id);
3331 continue;
3332 }
3333
3334 of_node_get(child);
3335 scmi_create_protocol_devices(child, info, prot_id, NULL);
3336 }
3337
3338 return 0;
3339
3340notification_exit:
3341 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
3342 scmi_raw_mode_cleanup(info->raw);
3343 scmi_notification_exit(&info->handle);
3344clear_dev_req_notifier:
3345 blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
3346 &info->dev_req_nb);
3347clear_bus_notifier:
3348 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
3349clear_txrx_setup:
3350 scmi_cleanup_txrx_channels(info);
3351clear_ida:
3352 ida_free(&scmi_id, info->id);
3353
3354out_err:
3355 return dev_err_probe(dev, ret, "%s", err_str);
3356}
3357
3358static void scmi_remove(struct platform_device *pdev)
3359{
3360 int id;
3361 struct scmi_info *info = platform_get_drvdata(pdev);
3362 struct device_node *child;
3363
3364 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
3365 scmi_raw_mode_cleanup(info->raw);
3366
3367 mutex_lock(&scmi_list_mutex);
3368 if (info->users)
3369 dev_warn(&pdev->dev,
3370 "Still active SCMI users will be forcibly unbound.\n");
3371 list_del(&info->node);
3372 mutex_unlock(&scmi_list_mutex);
3373
3374 scmi_notification_exit(&info->handle);
3375
3376 mutex_lock(&info->protocols_mtx);
3377 idr_destroy(&info->protocols);
3378 mutex_unlock(&info->protocols_mtx);
3379
3380 idr_for_each_entry(&info->active_protocols, child, id)
3381 of_node_put(child);
3382 idr_destroy(&info->active_protocols);
3383
3384 blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
3385 &info->dev_req_nb);
3386 bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
3387
3388 /* Safe to free channels since no more users */
3389 scmi_cleanup_txrx_channels(info);
3390
3391 ida_free(&scmi_id, info->id);
3392}
3393
3394static ssize_t protocol_version_show(struct device *dev,
3395 struct device_attribute *attr, char *buf)
3396{
3397 struct scmi_info *info = dev_get_drvdata(dev);
3398
3399 return sprintf(buf, "%u.%u\n", info->version.major_ver,
3400 info->version.minor_ver);
3401}
3402static DEVICE_ATTR_RO(protocol_version);
3403
3404static ssize_t firmware_version_show(struct device *dev,
3405 struct device_attribute *attr, char *buf)
3406{
3407 struct scmi_info *info = dev_get_drvdata(dev);
3408
3409 return sprintf(buf, "0x%x\n", info->version.impl_ver);
3410}
3411static DEVICE_ATTR_RO(firmware_version);
3412
3413static ssize_t vendor_id_show(struct device *dev,
3414 struct device_attribute *attr, char *buf)
3415{
3416 struct scmi_info *info = dev_get_drvdata(dev);
3417
3418 return sprintf(buf, "%s\n", info->version.vendor_id);
3419}
3420static DEVICE_ATTR_RO(vendor_id);
3421
3422static ssize_t sub_vendor_id_show(struct device *dev,
3423 struct device_attribute *attr, char *buf)
3424{
3425 struct scmi_info *info = dev_get_drvdata(dev);
3426
3427 return sprintf(buf, "%s\n", info->version.sub_vendor_id);
3428}
3429static DEVICE_ATTR_RO(sub_vendor_id);
3430
3431static struct attribute *versions_attrs[] = {
3432 &dev_attr_firmware_version.attr,
3433 &dev_attr_protocol_version.attr,
3434 &dev_attr_vendor_id.attr,
3435 &dev_attr_sub_vendor_id.attr,
3436 NULL,
3437};
3438ATTRIBUTE_GROUPS(versions);
3439
3440static struct platform_driver scmi_driver = {
3441 .driver = {
3442 .name = "arm-scmi",
3443 .suppress_bind_attrs = true,
3444 .dev_groups = versions_groups,
3445 },
3446 .probe = scmi_probe,
3447 .remove = scmi_remove,
3448};
3449
3450static struct dentry *scmi_debugfs_init(void)
3451{
3452 struct dentry *d;
3453
3454 d = debugfs_create_dir("scmi", NULL);
3455 if (IS_ERR(d)) {
3456 pr_err("Could NOT create SCMI top dentry.\n");
3457 return NULL;
3458 }
3459
3460 return d;
3461}
3462
3463int scmi_inflight_count(const struct scmi_handle *handle)
3464{
3465 if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
3466 struct scmi_info *info = handle_to_scmi_info(handle);
3467
3468 if (!info->dbg)
3469 return 0;
3470
3471 return atomic_read(&info->dbg->counters[XFERS_INFLIGHT]);
3472 } else {
3473 return 0;
3474 }
3475}
3476
3477static int __init scmi_driver_init(void)
3478{
3479 scmi_quirks_initialize();
3480
3481 /* Bail out if no SCMI transport was configured */
3482 if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
3483 return -EINVAL;
3484
3485 if (IS_ENABLED(CONFIG_ARM_SCMI_HAVE_SHMEM))
3486 scmi_trans_core_ops.shmem = scmi_shared_mem_operations_get();
3487
3488 if (IS_ENABLED(CONFIG_ARM_SCMI_HAVE_MSG))
3489 scmi_trans_core_ops.msg = scmi_message_operations_get();
3490
3491 if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS))
3492 scmi_top_dentry = scmi_debugfs_init();
3493
3494 scmi_base_register();
3495
3496 scmi_clock_register();
3497 scmi_perf_register();
3498 scmi_power_register();
3499 scmi_reset_register();
3500 scmi_sensors_register();
3501 scmi_voltage_register();
3502 scmi_system_register();
3503 scmi_powercap_register();
3504 scmi_pinctrl_register();
3505
3506 return platform_driver_register(&scmi_driver);
3507}
3508module_init(scmi_driver_init);
3509
3510static void __exit scmi_driver_exit(void)
3511{
3512 scmi_base_unregister();
3513
3514 scmi_clock_unregister();
3515 scmi_perf_unregister();
3516 scmi_power_unregister();
3517 scmi_reset_unregister();
3518 scmi_sensors_unregister();
3519 scmi_voltage_unregister();
3520 scmi_system_unregister();
3521 scmi_powercap_unregister();
3522 scmi_pinctrl_unregister();
3523
3524 platform_driver_unregister(&scmi_driver);
3525
3526 debugfs_remove_recursive(scmi_top_dentry);
3527}
3528module_exit(scmi_driver_exit);
3529
3530MODULE_ALIAS("platform:arm-scmi");
3531MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
3532MODULE_DESCRIPTION("ARM SCMI protocol driver");
3533MODULE_LICENSE("GPL v2");