Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
4 */
5
6#include <linux/clk/tegra.h>
7#include <linux/genalloc.h>
8#include <linux/mailbox_client.h>
9#include <linux/module.h>
10#include <linux/of.h>
11#include <linux/of_platform.h>
12#include <linux/platform_device.h>
13#include <linux/pm.h>
14#include <linux/semaphore.h>
15#include <linux/sched/clock.h>
16
17#include <soc/tegra/bpmp.h>
18#include <soc/tegra/bpmp-abi.h>
19#include <soc/tegra/ivc.h>
20
21#include "bpmp-private.h"
22
23#define MSG_ACK BIT(0)
24#define MSG_RING BIT(1)
25#define TAG_SZ 32
26
27static inline const struct tegra_bpmp_ops *
28channel_to_ops(struct tegra_bpmp_channel *channel)
29{
30 struct tegra_bpmp *bpmp = channel->bpmp;
31
32 return bpmp->soc->ops;
33}
34
35struct tegra_bpmp *tegra_bpmp_get_with_id(struct device *dev, unsigned int *id)
36{
37 struct platform_device *pdev;
38 struct of_phandle_args args;
39 struct tegra_bpmp *bpmp;
40 int err;
41
42 err = __of_parse_phandle_with_args(dev->of_node, "nvidia,bpmp", NULL,
43 1, 0, &args);
44 if (err < 0)
45 return ERR_PTR(err);
46
47 pdev = of_find_device_by_node(args.np);
48 if (!pdev) {
49 bpmp = ERR_PTR(-ENODEV);
50 goto put;
51 }
52
53 bpmp = platform_get_drvdata(pdev);
54 if (!bpmp) {
55 bpmp = ERR_PTR(-EPROBE_DEFER);
56 put_device(&pdev->dev);
57 goto put;
58 }
59
60 if (id)
61 *id = args.args[0];
62
63put:
64 of_node_put(args.np);
65 return bpmp;
66}
67EXPORT_SYMBOL_GPL(tegra_bpmp_get_with_id);
68
69struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
70{
71 struct platform_device *pdev;
72 struct tegra_bpmp *bpmp;
73 struct device_node *np;
74
75 np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
76 if (!np)
77 return ERR_PTR(-ENOENT);
78
79 pdev = of_find_device_by_node(np);
80 if (!pdev) {
81 bpmp = ERR_PTR(-ENODEV);
82 goto put;
83 }
84
85 bpmp = platform_get_drvdata(pdev);
86 if (!bpmp) {
87 bpmp = ERR_PTR(-EPROBE_DEFER);
88 put_device(&pdev->dev);
89 goto put;
90 }
91
92put:
93 of_node_put(np);
94 return bpmp;
95}
96EXPORT_SYMBOL_GPL(tegra_bpmp_get);
97
98void tegra_bpmp_put(struct tegra_bpmp *bpmp)
99{
100 if (bpmp)
101 put_device(bpmp->dev);
102}
103EXPORT_SYMBOL_GPL(tegra_bpmp_put);
104
105static int
106tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
107{
108 struct tegra_bpmp *bpmp = channel->bpmp;
109 unsigned int count;
110 int index;
111
112 count = bpmp->soc->channels.thread.count;
113
114 index = channel - channel->bpmp->threaded_channels;
115 if (index < 0 || index >= count)
116 return -EINVAL;
117
118 return index;
119}
120
121static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
122{
123 return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
124 (msg->rx.size <= MSG_DATA_MIN_SZ) &&
125 (msg->tx.size == 0 || msg->tx.data) &&
126 (msg->rx.size == 0 || msg->rx.data);
127}
128
129static bool tegra_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
130{
131 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
132
133 return ops->is_response_ready(channel);
134}
135
136static bool tegra_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
137{
138 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
139
140 return ops->is_request_ready(channel);
141}
142
143static int tegra_bpmp_wait_response(struct tegra_bpmp_channel *channel)
144{
145 unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
146 ktime_t end;
147
148 end = ktime_add_us(ktime_get(), timeout);
149
150 do {
151 if (tegra_bpmp_is_response_ready(channel))
152 return 0;
153 } while (ktime_before(ktime_get(), end));
154
155 return -ETIMEDOUT;
156}
157
158static int tegra_bpmp_ack_response(struct tegra_bpmp_channel *channel)
159{
160 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
161
162 return ops->ack_response(channel);
163}
164
165static int tegra_bpmp_ack_request(struct tegra_bpmp_channel *channel)
166{
167 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
168
169 return ops->ack_request(channel);
170}
171
172static bool
173tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
174{
175 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
176
177 return ops->is_request_channel_free(channel);
178}
179
180static bool
181tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
182{
183 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
184
185 return ops->is_response_channel_free(channel);
186}
187
188static int
189tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel *channel)
190{
191 unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
192 ktime_t start, now;
193
194 start = ns_to_ktime(local_clock());
195
196 do {
197 if (tegra_bpmp_is_request_channel_free(channel))
198 return 0;
199
200 now = ns_to_ktime(local_clock());
201 } while (ktime_us_delta(now, start) < timeout);
202
203 return -ETIMEDOUT;
204}
205
206static int tegra_bpmp_post_request(struct tegra_bpmp_channel *channel)
207{
208 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
209
210 return ops->post_request(channel);
211}
212
213static int tegra_bpmp_post_response(struct tegra_bpmp_channel *channel)
214{
215 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
216
217 return ops->post_response(channel);
218}
219
220static int tegra_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
221{
222 return bpmp->soc->ops->ring_doorbell(bpmp);
223}
224
225static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
226 void *data, size_t size, int *ret)
227{
228 int err;
229
230 if (data && size > 0)
231 tegra_bpmp_mb_read(data, &channel->ib, size);
232
233 err = tegra_bpmp_ack_response(channel);
234 if (err < 0)
235 return err;
236
237 *ret = tegra_bpmp_mb_read_field(&channel->ib, code);
238
239 return 0;
240}
241
242static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
243 void *data, size_t size, int *ret)
244{
245 struct tegra_bpmp *bpmp = channel->bpmp;
246 unsigned long flags;
247 ssize_t err;
248 int index;
249
250 index = tegra_bpmp_channel_get_thread_index(channel);
251 if (index < 0) {
252 err = index;
253 goto unlock;
254 }
255
256 spin_lock_irqsave(&bpmp->lock, flags);
257 err = __tegra_bpmp_channel_read(channel, data, size, ret);
258 clear_bit(index, bpmp->threaded.allocated);
259 spin_unlock_irqrestore(&bpmp->lock, flags);
260
261unlock:
262 up(&bpmp->threaded.lock);
263
264 return err;
265}
266
267static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
268 unsigned int mrq, unsigned long flags,
269 const void *data, size_t size)
270{
271 tegra_bpmp_mb_write_field(&channel->ob, code, mrq);
272 tegra_bpmp_mb_write_field(&channel->ob, flags, flags);
273
274 if (data && size > 0)
275 tegra_bpmp_mb_write(&channel->ob, data, size);
276
277 return tegra_bpmp_post_request(channel);
278}
279
280static struct tegra_bpmp_channel *
281tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
282 const void *data, size_t size)
283{
284 unsigned long timeout = bpmp->soc->channels.thread.timeout;
285 unsigned int count = bpmp->soc->channels.thread.count;
286 struct tegra_bpmp_channel *channel;
287 unsigned long flags;
288 unsigned int index;
289 int err;
290
291 err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
292 if (err < 0)
293 return ERR_PTR(err);
294
295 spin_lock_irqsave(&bpmp->lock, flags);
296
297 index = find_first_zero_bit(bpmp->threaded.allocated, count);
298 if (index == count) {
299 err = -EBUSY;
300 goto unlock;
301 }
302
303 channel = &bpmp->threaded_channels[index];
304
305 if (!tegra_bpmp_is_request_channel_free(channel)) {
306 err = -EBUSY;
307 goto unlock;
308 }
309
310 set_bit(index, bpmp->threaded.allocated);
311
312 err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
313 data, size);
314 if (err < 0)
315 goto clear_allocated;
316
317 set_bit(index, bpmp->threaded.busy);
318
319 spin_unlock_irqrestore(&bpmp->lock, flags);
320 return channel;
321
322clear_allocated:
323 clear_bit(index, bpmp->threaded.allocated);
324unlock:
325 spin_unlock_irqrestore(&bpmp->lock, flags);
326 up(&bpmp->threaded.lock);
327
328 return ERR_PTR(err);
329}
330
331static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
332 unsigned int mrq, unsigned long flags,
333 const void *data, size_t size)
334{
335 int err;
336
337 err = tegra_bpmp_wait_request_channel_free(channel);
338 if (err < 0)
339 return err;
340
341 return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
342}
343
344static int __maybe_unused tegra_bpmp_resume(struct device *dev);
345
346int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
347 struct tegra_bpmp_message *msg)
348{
349 struct tegra_bpmp_channel *channel;
350 int err;
351
352 if (WARN_ON(!irqs_disabled()))
353 return -EPERM;
354
355 if (!tegra_bpmp_message_valid(msg))
356 return -EINVAL;
357
358 if (bpmp->suspended) {
359 /* Reset BPMP IPC channels during resume based on flags passed */
360 if (msg->flags & TEGRA_BPMP_MESSAGE_RESET)
361 tegra_bpmp_resume(bpmp->dev);
362 else
363 return -EAGAIN;
364 }
365
366 channel = bpmp->tx_channel;
367
368 spin_lock(&bpmp->atomic_tx_lock);
369
370 err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
371 msg->tx.data, msg->tx.size);
372 if (err < 0) {
373 spin_unlock(&bpmp->atomic_tx_lock);
374 return err;
375 }
376
377 spin_unlock(&bpmp->atomic_tx_lock);
378
379 err = tegra_bpmp_ring_doorbell(bpmp);
380 if (err < 0)
381 return err;
382
383 err = tegra_bpmp_wait_response(channel);
384 if (err < 0)
385 return err;
386
387 return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
388 &msg->rx.ret);
389}
390EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
391
392int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
393 struct tegra_bpmp_message *msg)
394{
395 struct tegra_bpmp_channel *channel;
396 unsigned long timeout;
397 int err;
398
399 if (WARN_ON(irqs_disabled()))
400 return -EPERM;
401
402 if (!tegra_bpmp_message_valid(msg))
403 return -EINVAL;
404
405 if (bpmp->suspended) {
406 /* Reset BPMP IPC channels during resume based on flags passed */
407 if (msg->flags & TEGRA_BPMP_MESSAGE_RESET)
408 tegra_bpmp_resume(bpmp->dev);
409 else
410 return -EAGAIN;
411 }
412
413 channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
414 msg->tx.size);
415 if (IS_ERR(channel))
416 return PTR_ERR(channel);
417
418 err = tegra_bpmp_ring_doorbell(bpmp);
419 if (err < 0)
420 return err;
421
422 timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
423
424 err = wait_for_completion_timeout(&channel->completion, timeout);
425 if (err == 0)
426 return -ETIMEDOUT;
427
428 return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
429 &msg->rx.ret);
430}
431EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
432
433static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
434 unsigned int mrq)
435{
436 struct tegra_bpmp_mrq *entry;
437
438 list_for_each_entry(entry, &bpmp->mrqs, list)
439 if (entry->mrq == mrq)
440 return entry;
441
442 return NULL;
443}
444
445void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
446 const void *data, size_t size)
447{
448 unsigned long flags = tegra_bpmp_mb_read_field(&channel->ib, flags);
449 struct tegra_bpmp *bpmp = channel->bpmp;
450 int err;
451
452 if (WARN_ON(size > MSG_DATA_MIN_SZ))
453 return;
454
455 err = tegra_bpmp_ack_request(channel);
456 if (WARN_ON(err < 0))
457 return;
458
459 if ((flags & MSG_ACK) == 0)
460 return;
461
462 if (WARN_ON(!tegra_bpmp_is_response_channel_free(channel)))
463 return;
464
465 tegra_bpmp_mb_write_field(&channel->ob, code, code);
466
467 if (data && size > 0)
468 tegra_bpmp_mb_write(&channel->ob, data, size);
469
470 err = tegra_bpmp_post_response(channel);
471 if (WARN_ON(err < 0))
472 return;
473
474 if (flags & MSG_RING) {
475 err = tegra_bpmp_ring_doorbell(bpmp);
476 if (WARN_ON(err < 0))
477 return;
478 }
479}
480EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
481
482static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
483 unsigned int mrq,
484 struct tegra_bpmp_channel *channel)
485{
486 struct tegra_bpmp_mrq *entry;
487 u32 zero = 0;
488
489 spin_lock(&bpmp->lock);
490
491 entry = tegra_bpmp_find_mrq(bpmp, mrq);
492 if (!entry) {
493 spin_unlock(&bpmp->lock);
494 tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
495 return;
496 }
497
498 entry->handler(mrq, channel, entry->data);
499
500 spin_unlock(&bpmp->lock);
501}
502
503int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
504 tegra_bpmp_mrq_handler_t handler, void *data)
505{
506 struct tegra_bpmp_mrq *entry;
507 unsigned long flags;
508
509 if (!handler)
510 return -EINVAL;
511
512 entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
513 if (!entry)
514 return -ENOMEM;
515
516 spin_lock_irqsave(&bpmp->lock, flags);
517
518 entry->mrq = mrq;
519 entry->handler = handler;
520 entry->data = data;
521 list_add(&entry->list, &bpmp->mrqs);
522
523 spin_unlock_irqrestore(&bpmp->lock, flags);
524
525 return 0;
526}
527EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
528
529void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
530{
531 struct tegra_bpmp_mrq *entry;
532 unsigned long flags;
533
534 spin_lock_irqsave(&bpmp->lock, flags);
535
536 entry = tegra_bpmp_find_mrq(bpmp, mrq);
537 if (!entry)
538 goto unlock;
539
540 list_del(&entry->list);
541 devm_kfree(bpmp->dev, entry);
542
543unlock:
544 spin_unlock_irqrestore(&bpmp->lock, flags);
545}
546EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
547
548bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq)
549{
550 struct mrq_query_abi_request req = { .mrq = mrq };
551 struct mrq_query_abi_response resp;
552 struct tegra_bpmp_message msg = {
553 .mrq = MRQ_QUERY_ABI,
554 .tx = {
555 .data = &req,
556 .size = sizeof(req),
557 },
558 .rx = {
559 .data = &resp,
560 .size = sizeof(resp),
561 },
562 };
563 int err;
564
565 err = tegra_bpmp_transfer(bpmp, &msg);
566 if (err || msg.rx.ret)
567 return false;
568
569 return resp.status == 0;
570}
571EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_is_supported);
572
573static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
574 struct tegra_bpmp_channel *channel,
575 void *data)
576{
577 struct mrq_ping_request request;
578 struct mrq_ping_response response;
579
580 tegra_bpmp_mb_read(&request, &channel->ib, sizeof(request));
581
582 memset(&response, 0, sizeof(response));
583 response.reply = request.challenge << 1;
584
585 tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
586}
587
588static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
589{
590 struct mrq_ping_response response;
591 struct mrq_ping_request request;
592 struct tegra_bpmp_message msg;
593 unsigned long flags;
594 ktime_t start, end;
595 int err;
596
597 memset(&request, 0, sizeof(request));
598 request.challenge = 1;
599
600 memset(&response, 0, sizeof(response));
601
602 memset(&msg, 0, sizeof(msg));
603 msg.mrq = MRQ_PING;
604 msg.tx.data = &request;
605 msg.tx.size = sizeof(request);
606 msg.rx.data = &response;
607 msg.rx.size = sizeof(response);
608
609 local_irq_save(flags);
610 start = ktime_get();
611 err = tegra_bpmp_transfer_atomic(bpmp, &msg);
612 end = ktime_get();
613 local_irq_restore(flags);
614
615 if (!err)
616 dev_dbg(bpmp->dev,
617 "ping ok: challenge: %u, response: %u, time: %lld\n",
618 request.challenge, response.reply,
619 ktime_to_us(ktime_sub(end, start)));
620
621 return err;
622}
623
624/* deprecated version of tag query */
625static int tegra_bpmp_get_firmware_tag_old(struct tegra_bpmp *bpmp, char *tag,
626 size_t size)
627{
628 struct mrq_query_tag_request request;
629 struct tegra_bpmp_message msg;
630 unsigned long flags;
631 dma_addr_t phys;
632 void *virt;
633 int err;
634
635 if (size != TAG_SZ)
636 return -EINVAL;
637
638 virt = dma_alloc_coherent(bpmp->dev, TAG_SZ, &phys,
639 GFP_KERNEL | GFP_DMA32);
640 if (!virt)
641 return -ENOMEM;
642
643 memset(&request, 0, sizeof(request));
644 request.addr = phys;
645
646 memset(&msg, 0, sizeof(msg));
647 msg.mrq = MRQ_QUERY_TAG;
648 msg.tx.data = &request;
649 msg.tx.size = sizeof(request);
650
651 local_irq_save(flags);
652 err = tegra_bpmp_transfer_atomic(bpmp, &msg);
653 local_irq_restore(flags);
654
655 if (err == 0)
656 memcpy(tag, virt, TAG_SZ);
657
658 dma_free_coherent(bpmp->dev, TAG_SZ, virt, phys);
659
660 return err;
661}
662
663static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
664 size_t size)
665{
666 if (tegra_bpmp_mrq_is_supported(bpmp, MRQ_QUERY_FW_TAG)) {
667 struct mrq_query_fw_tag_response resp;
668 struct tegra_bpmp_message msg = {
669 .mrq = MRQ_QUERY_FW_TAG,
670 .rx = {
671 .data = &resp,
672 .size = sizeof(resp),
673 },
674 };
675 int err;
676
677 if (size != sizeof(resp.tag))
678 return -EINVAL;
679
680 err = tegra_bpmp_transfer(bpmp, &msg);
681
682 if (err)
683 return err;
684 if (msg.rx.ret < 0)
685 return -EINVAL;
686
687 memcpy(tag, resp.tag, sizeof(resp.tag));
688 return 0;
689 }
690
691 return tegra_bpmp_get_firmware_tag_old(bpmp, tag, size);
692}
693
694static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
695{
696 unsigned long flags = tegra_bpmp_mb_read_field(&channel->ob, flags);
697
698 if ((flags & MSG_RING) == 0)
699 return;
700
701 complete(&channel->completion);
702}
703
704void tegra_bpmp_handle_rx(struct tegra_bpmp *bpmp)
705{
706 struct tegra_bpmp_channel *channel;
707 unsigned int i, count;
708 unsigned long *busy;
709
710 channel = bpmp->rx_channel;
711 count = bpmp->soc->channels.thread.count;
712 busy = bpmp->threaded.busy;
713
714 if (tegra_bpmp_is_request_ready(channel)) {
715 unsigned int mrq = tegra_bpmp_mb_read_field(&channel->ib, code);
716
717 tegra_bpmp_handle_mrq(bpmp, mrq, channel);
718 }
719
720 spin_lock(&bpmp->lock);
721
722 for_each_set_bit(i, busy, count) {
723 struct tegra_bpmp_channel *channel;
724
725 channel = &bpmp->threaded_channels[i];
726
727 if (tegra_bpmp_is_response_ready(channel)) {
728 tegra_bpmp_channel_signal(channel);
729 clear_bit(i, busy);
730 }
731 }
732
733 spin_unlock(&bpmp->lock);
734}
735
736static int tegra_bpmp_probe(struct platform_device *pdev)
737{
738 struct tegra_bpmp *bpmp;
739 char tag[TAG_SZ];
740 size_t size;
741 int err;
742
743 bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
744 if (!bpmp)
745 return -ENOMEM;
746
747 bpmp->soc = of_device_get_match_data(&pdev->dev);
748 bpmp->dev = &pdev->dev;
749
750 INIT_LIST_HEAD(&bpmp->mrqs);
751 spin_lock_init(&bpmp->lock);
752
753 bpmp->threaded.count = bpmp->soc->channels.thread.count;
754 sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
755
756 size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
757
758 bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
759 if (!bpmp->threaded.allocated)
760 return -ENOMEM;
761
762 bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
763 if (!bpmp->threaded.busy)
764 return -ENOMEM;
765
766 spin_lock_init(&bpmp->atomic_tx_lock);
767 bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
768 GFP_KERNEL);
769 if (!bpmp->tx_channel)
770 return -ENOMEM;
771
772 bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
773 GFP_KERNEL);
774 if (!bpmp->rx_channel)
775 return -ENOMEM;
776
777 bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
778 sizeof(*bpmp->threaded_channels),
779 GFP_KERNEL);
780 if (!bpmp->threaded_channels)
781 return -ENOMEM;
782
783 platform_set_drvdata(pdev, bpmp);
784
785 err = bpmp->soc->ops->init(bpmp);
786 if (err < 0)
787 return err;
788
789 err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
790 tegra_bpmp_mrq_handle_ping, bpmp);
791 if (err < 0)
792 goto deinit;
793
794 err = tegra_bpmp_ping(bpmp);
795 if (err < 0) {
796 dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
797 goto free_mrq;
798 }
799
800 err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag));
801 if (err < 0) {
802 dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
803 goto free_mrq;
804 }
805
806 dev_info(&pdev->dev, "firmware: %.*s\n", (int)sizeof(tag), tag);
807
808 err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
809 if (err < 0)
810 goto free_mrq;
811
812 if (of_property_present(pdev->dev.of_node, "#clock-cells")) {
813 err = tegra_bpmp_init_clocks(bpmp);
814 if (err < 0)
815 goto free_mrq;
816 }
817
818 if (of_property_present(pdev->dev.of_node, "#reset-cells")) {
819 err = tegra_bpmp_init_resets(bpmp);
820 if (err < 0)
821 goto free_mrq;
822 }
823
824 if (of_property_present(pdev->dev.of_node, "#power-domain-cells")) {
825 err = tegra_bpmp_init_powergates(bpmp);
826 if (err < 0)
827 goto free_mrq;
828 }
829
830 err = tegra_bpmp_init_debugfs(bpmp);
831 if (err < 0)
832 dev_err(&pdev->dev, "debugfs initialization failed: %d\n", err);
833
834 return 0;
835
836free_mrq:
837 tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
838deinit:
839 if (bpmp->soc->ops->deinit)
840 bpmp->soc->ops->deinit(bpmp);
841
842 return err;
843}
844
845static int __maybe_unused tegra_bpmp_suspend(struct device *dev)
846{
847 struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
848
849 bpmp->suspended = true;
850
851 return 0;
852}
853
854static int __maybe_unused tegra_bpmp_resume(struct device *dev)
855{
856 struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
857
858 bpmp->suspended = false;
859
860 if (bpmp->soc->ops->resume)
861 return bpmp->soc->ops->resume(bpmp);
862 else
863 return 0;
864}
865
866static const struct dev_pm_ops tegra_bpmp_pm_ops = {
867 .suspend_noirq = tegra_bpmp_suspend,
868 .resume_noirq = tegra_bpmp_resume,
869};
870
871#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
872 IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
873 IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) || \
874 IS_ENABLED(CONFIG_ARCH_TEGRA_264_SOC)
875static const struct tegra_bpmp_soc tegra186_soc = {
876 .channels = {
877 .cpu_tx = {
878 .offset = 3,
879 .timeout = 60 * USEC_PER_SEC,
880 },
881 .thread = {
882 .offset = 0,
883 .count = 3,
884 .timeout = 600 * USEC_PER_SEC,
885 },
886 .cpu_rx = {
887 .offset = 13,
888 .timeout = 0,
889 },
890 },
891 .ops = &tegra186_bpmp_ops,
892 .num_resets = 193,
893};
894#endif
895
896#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
897static const struct tegra_bpmp_soc tegra210_soc = {
898 .channels = {
899 .cpu_tx = {
900 .offset = 0,
901 .count = 1,
902 .timeout = 60 * USEC_PER_SEC,
903 },
904 .thread = {
905 .offset = 4,
906 .count = 1,
907 .timeout = 600 * USEC_PER_SEC,
908 },
909 .cpu_rx = {
910 .offset = 8,
911 .count = 1,
912 .timeout = 0,
913 },
914 },
915 .ops = &tegra210_bpmp_ops,
916};
917#endif
918
919static const struct of_device_id tegra_bpmp_match[] = {
920#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
921 IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
922 IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) || \
923 IS_ENABLED(CONFIG_ARCH_TEGRA_264_SOC)
924 { .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
925#endif
926#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
927 { .compatible = "nvidia,tegra210-bpmp", .data = &tegra210_soc },
928#endif
929 { }
930};
931
932static struct platform_driver tegra_bpmp_driver = {
933 .driver = {
934 .name = "tegra-bpmp",
935 .of_match_table = tegra_bpmp_match,
936 .pm = &tegra_bpmp_pm_ops,
937 .suppress_bind_attrs = true,
938 },
939 .probe = tegra_bpmp_probe,
940};
941builtin_platform_driver(tegra_bpmp_driver);