Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
4 * Intel Management Engine Interface (Intel MEI) Linux driver
5 */
6
7#include <linux/cleanup.h>
8#include <linux/module.h>
9#include <linux/moduleparam.h>
10#include <linux/kernel.h>
11#include <linux/device.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/errno.h>
15#include <linux/types.h>
16#include <linux/fcntl.h>
17#include <linux/pm_runtime.h>
18#include <linux/poll.h>
19#include <linux/init.h>
20#include <linux/ioctl.h>
21#include <linux/cdev.h>
22#include <linux/sched/signal.h>
23#include <linux/compat.h>
24#include <linux/jiffies.h>
25#include <linux/interrupt.h>
26
27#include <linux/mei.h>
28
29#include "mei_dev.h"
30#include "client.h"
31
32static const struct class mei_class = {
33 .name = "mei",
34};
35
36static dev_t mei_devt;
37#define MEI_MAX_DEVS MINORMASK
38static DEFINE_MUTEX(mei_minor_lock);
39static DEFINE_IDR(mei_idr);
40
41/**
42 * mei_open - the open function
43 *
44 * @inode: pointer to inode structure
45 * @file: pointer to file structure
46 *
47 * Return: 0 on success, <0 on error
48 */
49static int mei_open(struct inode *inode, struct file *file)
50{
51 struct mei_device *dev;
52 struct mei_cl *cl;
53
54 int err;
55
56 dev = idr_find(&mei_idr, iminor(inode));
57 if (!dev)
58 return -ENODEV;
59 get_device(&dev->dev);
60
61 mutex_lock(&dev->device_lock);
62
63 if (dev->dev_state != MEI_DEV_ENABLED) {
64 dev_dbg(&dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
65 mei_dev_state_str(dev->dev_state));
66 err = -ENODEV;
67 goto err_unlock;
68 }
69
70 cl = mei_cl_alloc_linked(dev);
71 if (IS_ERR(cl)) {
72 err = PTR_ERR(cl);
73 goto err_unlock;
74 }
75
76 cl->fp = file;
77 file->private_data = cl;
78
79 mutex_unlock(&dev->device_lock);
80
81 return nonseekable_open(inode, file);
82
83err_unlock:
84 mutex_unlock(&dev->device_lock);
85 put_device(&dev->dev);
86 return err;
87}
88
89/**
90 * mei_cl_vtag_remove_by_fp - remove vtag that corresponds to fp from list
91 *
92 * @cl: host client
93 * @fp: pointer to file structure
94 *
95 */
96static void mei_cl_vtag_remove_by_fp(const struct mei_cl *cl,
97 const struct file *fp)
98{
99 struct mei_cl_vtag *vtag_l, *next;
100
101 list_for_each_entry_safe(vtag_l, next, &cl->vtag_map, list) {
102 if (vtag_l->fp == fp) {
103 list_del(&vtag_l->list);
104 kfree(vtag_l);
105 return;
106 }
107 }
108}
109
110/**
111 * mei_release - the release function
112 *
113 * @inode: pointer to inode structure
114 * @file: pointer to file structure
115 *
116 * Return: 0 on success, <0 on error
117 */
118static int mei_release(struct inode *inode, struct file *file)
119{
120 struct mei_cl *cl = file->private_data;
121 struct mei_device *dev;
122 int rets;
123
124 if (WARN_ON(!cl || !cl->dev))
125 return -ENODEV;
126
127 dev = cl->dev;
128
129 mutex_lock(&dev->device_lock);
130
131 mei_cl_vtag_remove_by_fp(cl, file);
132
133 if (!list_empty(&cl->vtag_map)) {
134 cl_dbg(dev, cl, "not the last vtag\n");
135 mei_cl_flush_queues(cl, file);
136 rets = 0;
137 goto out;
138 }
139
140 rets = mei_cl_disconnect(cl);
141 /*
142 * Check again: This is necessary since disconnect releases the lock
143 * and another client can connect in the meantime.
144 */
145 if (!list_empty(&cl->vtag_map)) {
146 cl_dbg(dev, cl, "not the last vtag after disconnect\n");
147 mei_cl_flush_queues(cl, file);
148 goto out;
149 }
150
151 mei_cl_flush_queues(cl, NULL);
152 cl_dbg(dev, cl, "removing\n");
153
154 mei_cl_unlink(cl);
155 kfree(cl);
156
157out:
158 file->private_data = NULL;
159
160 mutex_unlock(&dev->device_lock);
161 put_device(&dev->dev);
162 return rets;
163}
164
165
166/**
167 * mei_read - the read function.
168 *
169 * @file: pointer to file structure
170 * @ubuf: pointer to user buffer
171 * @length: buffer length
172 * @offset: data offset in buffer
173 *
174 * Return: >=0 data length on success , <0 on error
175 */
176static ssize_t mei_read(struct file *file, char __user *ubuf,
177 size_t length, loff_t *offset)
178{
179 struct mei_cl *cl = file->private_data;
180 struct mei_device *dev;
181 struct mei_cl_cb *cb = NULL;
182 bool nonblock = !!(file->f_flags & O_NONBLOCK);
183 ssize_t rets;
184
185 if (WARN_ON(!cl || !cl->dev))
186 return -ENODEV;
187
188 dev = cl->dev;
189
190
191 mutex_lock(&dev->device_lock);
192 if (dev->dev_state != MEI_DEV_ENABLED) {
193 rets = -ENODEV;
194 goto out;
195 }
196
197 if (length == 0) {
198 rets = 0;
199 goto out;
200 }
201
202 if (ubuf == NULL) {
203 rets = -EMSGSIZE;
204 goto out;
205 }
206
207 cb = mei_cl_read_cb(cl, file);
208 if (cb)
209 goto copy_buffer;
210
211 if (*offset > 0)
212 *offset = 0;
213
214 rets = mei_cl_read_start(cl, length, file);
215 if (rets && rets != -EBUSY) {
216 cl_dbg(dev, cl, "mei start read failure status = %zd\n", rets);
217 goto out;
218 }
219
220 if (nonblock) {
221 rets = -EAGAIN;
222 goto out;
223 }
224
225 mutex_unlock(&dev->device_lock);
226 if (wait_event_interruptible(cl->rx_wait,
227 mei_cl_read_cb(cl, file) ||
228 !mei_cl_is_connected(cl))) {
229 if (signal_pending(current))
230 return -EINTR;
231 return -ERESTARTSYS;
232 }
233 mutex_lock(&dev->device_lock);
234
235 if (!mei_cl_is_connected(cl)) {
236 rets = -ENODEV;
237 goto out;
238 }
239
240 cb = mei_cl_read_cb(cl, file);
241 if (!cb) {
242 rets = 0;
243 goto out;
244 }
245
246copy_buffer:
247 /* now copy the data to user space */
248 if (cb->status) {
249 rets = cb->status;
250 cl_dbg(dev, cl, "read operation failed %zd\n", rets);
251 goto free;
252 }
253
254 cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n",
255 cb->buf.size, cb->buf_idx, *offset);
256 if (*offset >= cb->buf_idx) {
257 rets = 0;
258 goto free;
259 }
260
261 /* length is being truncated to PAGE_SIZE,
262 * however buf_idx may point beyond that */
263 length = min_t(size_t, length, cb->buf_idx - *offset);
264
265 if (copy_to_user(ubuf, cb->buf.data + *offset, length)) {
266 cl_dbg(dev, cl, "failed to copy data to userland\n");
267 rets = -EFAULT;
268 goto free;
269 }
270
271 rets = length;
272 *offset += length;
273 /* not all data was read, keep the cb */
274 if (*offset < cb->buf_idx)
275 goto out;
276
277free:
278 mei_cl_del_rd_completed(cl, cb);
279 *offset = 0;
280
281out:
282 cl_dbg(dev, cl, "end mei read rets = %zd\n", rets);
283 mutex_unlock(&dev->device_lock);
284 return rets;
285}
286
287/**
288 * mei_cl_vtag_by_fp - obtain the vtag by file pointer
289 *
290 * @cl: host client
291 * @fp: pointer to file structure
292 *
293 * Return: vtag value on success, otherwise 0
294 */
295static u8 mei_cl_vtag_by_fp(const struct mei_cl *cl, const struct file *fp)
296{
297 struct mei_cl_vtag *cl_vtag;
298
299 if (!fp)
300 return 0;
301
302 list_for_each_entry(cl_vtag, &cl->vtag_map, list)
303 if (cl_vtag->fp == fp)
304 return cl_vtag->vtag;
305 return 0;
306}
307
308/**
309 * mei_write - the write function.
310 *
311 * @file: pointer to file structure
312 * @ubuf: pointer to user buffer
313 * @length: buffer length
314 * @offset: data offset in buffer
315 *
316 * Return: >=0 data length on success , <0 on error
317 */
318static ssize_t mei_write(struct file *file, const char __user *ubuf,
319 size_t length, loff_t *offset)
320{
321 struct mei_cl *cl = file->private_data;
322 struct mei_cl_cb *cb;
323 struct mei_device *dev;
324 ssize_t rets;
325
326 if (WARN_ON(!cl || !cl->dev))
327 return -ENODEV;
328
329 dev = cl->dev;
330
331 mutex_lock(&dev->device_lock);
332
333 if (dev->dev_state != MEI_DEV_ENABLED) {
334 rets = -ENODEV;
335 goto out;
336 }
337
338 if (!mei_cl_is_connected(cl)) {
339 cl_dbg(dev, cl, "is not connected");
340 rets = -ENODEV;
341 goto out;
342 }
343
344 if (!mei_me_cl_is_active(cl->me_cl)) {
345 rets = -ENOTTY;
346 goto out;
347 }
348
349 if (length > mei_cl_mtu(cl)) {
350 rets = -EFBIG;
351 goto out;
352 }
353
354 if (length == 0) {
355 rets = 0;
356 goto out;
357 }
358
359 while (cl->tx_cb_queued >= dev->tx_queue_limit) {
360 if (file->f_flags & O_NONBLOCK) {
361 rets = -EAGAIN;
362 goto out;
363 }
364 mutex_unlock(&dev->device_lock);
365 rets = wait_event_interruptible(cl->tx_wait,
366 cl->writing_state == MEI_WRITE_COMPLETE ||
367 (!mei_cl_is_connected(cl)));
368 mutex_lock(&dev->device_lock);
369 if (rets) {
370 if (signal_pending(current))
371 rets = -EINTR;
372 goto out;
373 }
374 if (!mei_cl_is_connected(cl)) {
375 rets = -ENODEV;
376 goto out;
377 }
378 }
379
380 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
381 if (!cb) {
382 rets = -ENOMEM;
383 goto out;
384 }
385 cb->vtag = mei_cl_vtag_by_fp(cl, file);
386
387 rets = copy_from_user(cb->buf.data, ubuf, length);
388 if (rets) {
389 cl_dbg(dev, cl, "failed to copy data from userland\n");
390 rets = -EFAULT;
391 mei_io_cb_free(cb);
392 goto out;
393 }
394
395 rets = mei_cl_write(cl, cb, MAX_SCHEDULE_TIMEOUT);
396out:
397 mutex_unlock(&dev->device_lock);
398 return rets;
399}
400
401/**
402 * mei_ioctl_connect_client - the connect to fw client IOCTL function
403 *
404 * @file: private data of the file object
405 * @in_client_uuid: requested UUID for connection
406 * @client: IOCTL connect data, output parameters
407 *
408 * Locking: called under "dev->device_lock" lock
409 *
410 * Return: 0 on success, <0 on failure.
411 */
412static int mei_ioctl_connect_client(struct file *file,
413 const uuid_le *in_client_uuid,
414 struct mei_client *client)
415{
416 struct mei_device *dev;
417 struct mei_me_client *me_cl;
418 struct mei_cl *cl;
419 int rets;
420
421 cl = file->private_data;
422 dev = cl->dev;
423
424 if (cl->state != MEI_FILE_INITIALIZING &&
425 cl->state != MEI_FILE_DISCONNECTED)
426 return -EBUSY;
427
428retry:
429 /* find ME client we're trying to connect to */
430 me_cl = mei_me_cl_by_uuid(dev, in_client_uuid);
431 if (!me_cl) {
432 cl_dbg(dev, cl, "Cannot connect to FW Client UUID = %pUl\n",
433 in_client_uuid);
434 rets = -ENOTTY;
435 goto end;
436 }
437
438 if (me_cl->props.fixed_address) {
439 bool forbidden = dev->override_fixed_address ?
440 !dev->allow_fixed_address : !dev->hbm_f_fa_supported;
441 if (forbidden) {
442 cl_dbg(dev, cl, "Connection forbidden to FW Client UUID = %pUl\n",
443 in_client_uuid);
444 rets = -ENOTTY;
445 goto end;
446 }
447 }
448
449 cl_dbg(dev, cl, "Connect to FW Client ID = %d\n", me_cl->client_id);
450 cl_dbg(dev, cl, "FW Client - Protocol Version = %d\n", me_cl->props.protocol_version);
451 cl_dbg(dev, cl, "FW Client - Max Msg Len = %d\n", me_cl->props.max_msg_length);
452
453 /* prepare the output buffer */
454 client->max_msg_length = me_cl->props.max_msg_length;
455 client->protocol_version = me_cl->props.protocol_version;
456 cl_dbg(dev, cl, "Can connect?\n");
457
458 rets = mei_cl_connect(cl, me_cl, file);
459
460 if (rets && cl->status == -EFAULT &&
461 (dev->dev_state == MEI_DEV_RESETTING ||
462 dev->dev_state == MEI_DEV_INIT_CLIENTS)) {
463 /* in link reset, wait for it completion */
464 mutex_unlock(&dev->device_lock);
465 rets = wait_event_interruptible_timeout(dev->wait_dev_state,
466 dev->dev_state == MEI_DEV_ENABLED,
467 dev->timeouts.link_reset_wait);
468 mutex_lock(&dev->device_lock);
469 if (rets < 0) {
470 if (signal_pending(current))
471 rets = -EINTR;
472 goto end;
473 }
474 if (dev->dev_state != MEI_DEV_ENABLED) {
475 rets = -ETIME;
476 goto end;
477 }
478 mei_me_cl_put(me_cl);
479 goto retry;
480 }
481
482end:
483 mei_me_cl_put(me_cl);
484 return rets;
485}
486
487/**
488 * mei_vt_support_check - check if client support vtags
489 *
490 * @dev: mei_device
491 * @uuid: client UUID
492 *
493 * Locking: called under "dev->device_lock" lock
494 *
495 * Return:
496 * 0 - supported
497 * -ENOTTY - no such client
498 * -EOPNOTSUPP - vtags are not supported by client
499 */
500static int mei_vt_support_check(struct mei_device *dev, const uuid_le *uuid)
501{
502 struct mei_me_client *me_cl;
503 int ret;
504
505 if (!dev->hbm_f_vt_supported)
506 return -EOPNOTSUPP;
507
508 me_cl = mei_me_cl_by_uuid(dev, uuid);
509 if (!me_cl) {
510 dev_dbg(&dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
511 uuid);
512 return -ENOTTY;
513 }
514 ret = me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
515 mei_me_cl_put(me_cl);
516
517 return ret;
518}
519
520/**
521 * mei_ioctl_connect_vtag - connect to fw client with vtag IOCTL function
522 *
523 * @file: private data of the file object
524 * @in_client_uuid: requested UUID for connection
525 * @client: IOCTL connect data, output parameters
526 * @vtag: vm tag
527 *
528 * Locking: called under "dev->device_lock" lock
529 *
530 * Return: 0 on success, <0 on failure.
531 */
532static int mei_ioctl_connect_vtag(struct file *file,
533 const uuid_le *in_client_uuid,
534 struct mei_client *client,
535 u8 vtag)
536{
537 struct mei_device *dev;
538 struct mei_cl *cl;
539 struct mei_cl *pos;
540 struct mei_cl_vtag *cl_vtag;
541
542 cl = file->private_data;
543 dev = cl->dev;
544
545 cl_dbg(dev, cl, "FW Client %pUl vtag %d\n", in_client_uuid, vtag);
546
547 switch (cl->state) {
548 case MEI_FILE_DISCONNECTED:
549 if (mei_cl_vtag_by_fp(cl, file) != vtag) {
550 cl_err(dev, cl, "reconnect with different vtag\n");
551 return -EINVAL;
552 }
553 break;
554 case MEI_FILE_INITIALIZING:
555 /* malicious connect from another thread may push vtag */
556 if (!IS_ERR(mei_cl_fp_by_vtag(cl, vtag))) {
557 cl_err(dev, cl, "vtag already filled\n");
558 return -EINVAL;
559 }
560
561 list_for_each_entry(pos, &dev->file_list, link) {
562 if (pos == cl)
563 continue;
564 if (!pos->me_cl)
565 continue;
566
567 /* only search for same UUID */
568 if (uuid_le_cmp(*mei_cl_uuid(pos), *in_client_uuid))
569 continue;
570
571 /* if tag already exist try another fp */
572 if (!IS_ERR(mei_cl_fp_by_vtag(pos, vtag)))
573 continue;
574
575 /* replace cl with acquired one */
576 cl_dbg(dev, cl, "replacing with existing cl\n");
577 mei_cl_unlink(cl);
578 kfree(cl);
579 file->private_data = pos;
580 cl = pos;
581 break;
582 }
583
584 cl_vtag = mei_cl_vtag_alloc(file, vtag);
585 if (IS_ERR(cl_vtag))
586 return -ENOMEM;
587
588 list_add_tail(&cl_vtag->list, &cl->vtag_map);
589 break;
590 default:
591 return -EBUSY;
592 }
593
594 while (cl->state != MEI_FILE_INITIALIZING &&
595 cl->state != MEI_FILE_DISCONNECTED &&
596 cl->state != MEI_FILE_CONNECTED) {
597 mutex_unlock(&dev->device_lock);
598 wait_event_timeout(cl->wait,
599 (cl->state == MEI_FILE_CONNECTED ||
600 cl->state == MEI_FILE_DISCONNECTED ||
601 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
602 cl->state == MEI_FILE_DISCONNECT_REPLY),
603 dev->timeouts.cl_connect);
604 mutex_lock(&dev->device_lock);
605 }
606
607 if (!mei_cl_is_connected(cl))
608 return mei_ioctl_connect_client(file, in_client_uuid, client);
609
610 client->max_msg_length = cl->me_cl->props.max_msg_length;
611 client->protocol_version = cl->me_cl->props.protocol_version;
612
613 return 0;
614}
615
616/**
617 * mei_ioctl_client_notify_request - propagate event notification
618 * request to client
619 *
620 * @file: pointer to file structure
621 * @request: 0 - disable, 1 - enable
622 *
623 * Return: 0 on success , <0 on error
624 */
625static int mei_ioctl_client_notify_request(const struct file *file, u32 request)
626{
627 struct mei_cl *cl = file->private_data;
628
629 if (request != MEI_HBM_NOTIFICATION_START &&
630 request != MEI_HBM_NOTIFICATION_STOP)
631 return -EINVAL;
632
633 return mei_cl_notify_request(cl, file, (u8)request);
634}
635
636/**
637 * mei_ioctl_client_notify_get - wait for notification request
638 *
639 * @file: pointer to file structure
640 * @notify_get: 0 - disable, 1 - enable
641 *
642 * Return: 0 on success , <0 on error
643 */
644static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get)
645{
646 struct mei_cl *cl = file->private_data;
647 bool notify_ev;
648 bool block = (file->f_flags & O_NONBLOCK) == 0;
649 int rets;
650
651 rets = mei_cl_notify_get(cl, block, ¬ify_ev);
652 if (rets)
653 return rets;
654
655 *notify_get = notify_ev ? 1 : 0;
656 return 0;
657}
658
659/**
660 * mei_ioctl - the IOCTL function
661 *
662 * @file: pointer to file structure
663 * @cmd: ioctl command
664 * @data: pointer to mei message structure
665 *
666 * Return: 0 on success , <0 on error
667 */
668static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
669{
670 struct mei_device *dev;
671 struct mei_cl *cl = file->private_data;
672 struct mei_connect_client_data conn;
673 struct mei_connect_client_data_vtag conn_vtag;
674 uuid_le cl_uuid;
675 struct mei_client *props;
676 u8 vtag;
677 u32 notify_get, notify_req;
678 int rets;
679
680
681 if (WARN_ON(!cl || !cl->dev))
682 return -ENODEV;
683
684 dev = cl->dev;
685
686 cl_dbg(dev, cl, "IOCTL cmd = 0x%x", cmd);
687
688 mutex_lock(&dev->device_lock);
689 if (dev->dev_state != MEI_DEV_ENABLED) {
690 rets = -ENODEV;
691 goto out;
692 }
693
694 switch (cmd) {
695 case IOCTL_MEI_CONNECT_CLIENT:
696 cl_dbg(dev, cl, "IOCTL_MEI_CONNECT_CLIENT\n");
697 if (copy_from_user(&conn, (char __user *)data, sizeof(conn))) {
698 cl_dbg(dev, cl, "failed to copy data from userland\n");
699 rets = -EFAULT;
700 goto out;
701 }
702 cl_uuid = conn.in_client_uuid;
703 props = &conn.out_client_properties;
704 vtag = 0;
705
706 rets = mei_vt_support_check(dev, &cl_uuid);
707 if (rets == -ENOTTY)
708 goto out;
709 if (!rets)
710 rets = mei_ioctl_connect_vtag(file, &cl_uuid, props,
711 vtag);
712 else
713 rets = mei_ioctl_connect_client(file, &cl_uuid, props);
714 if (rets)
715 goto out;
716
717 /* if all is ok, copying the data back to user. */
718 if (copy_to_user((char __user *)data, &conn, sizeof(conn))) {
719 cl_dbg(dev, cl, "failed to copy data to userland\n");
720 rets = -EFAULT;
721 goto out;
722 }
723
724 break;
725
726 case IOCTL_MEI_CONNECT_CLIENT_VTAG:
727 cl_dbg(dev, cl, "IOCTL_MEI_CONNECT_CLIENT_VTAG\n");
728 if (copy_from_user(&conn_vtag, (char __user *)data,
729 sizeof(conn_vtag))) {
730 cl_dbg(dev, cl, "failed to copy data from userland\n");
731 rets = -EFAULT;
732 goto out;
733 }
734
735 cl_uuid = conn_vtag.connect.in_client_uuid;
736 props = &conn_vtag.out_client_properties;
737 vtag = conn_vtag.connect.vtag;
738
739 rets = mei_vt_support_check(dev, &cl_uuid);
740 if (rets == -EOPNOTSUPP)
741 cl_dbg(dev, cl, "FW Client %pUl does not support vtags\n",
742 &cl_uuid);
743 if (rets)
744 goto out;
745
746 if (!vtag) {
747 cl_dbg(dev, cl, "vtag can't be zero\n");
748 rets = -EINVAL;
749 goto out;
750 }
751
752 rets = mei_ioctl_connect_vtag(file, &cl_uuid, props, vtag);
753 if (rets)
754 goto out;
755
756 /* if all is ok, copying the data back to user. */
757 if (copy_to_user((char __user *)data, &conn_vtag,
758 sizeof(conn_vtag))) {
759 cl_dbg(dev, cl, "failed to copy data to userland\n");
760 rets = -EFAULT;
761 goto out;
762 }
763
764 break;
765
766 case IOCTL_MEI_NOTIFY_SET:
767 cl_dbg(dev, cl, "IOCTL_MEI_NOTIFY_SET\n");
768 if (copy_from_user(¬ify_req,
769 (char __user *)data, sizeof(notify_req))) {
770 cl_dbg(dev, cl, "failed to copy data from userland\n");
771 rets = -EFAULT;
772 goto out;
773 }
774 rets = mei_ioctl_client_notify_request(file, notify_req);
775 break;
776
777 case IOCTL_MEI_NOTIFY_GET:
778 cl_dbg(dev, cl, "IOCTL_MEI_NOTIFY_GET\n");
779 rets = mei_ioctl_client_notify_get(file, ¬ify_get);
780 if (rets)
781 goto out;
782
783 cl_dbg(dev, cl, "copy connect data to user\n");
784 if (copy_to_user((char __user *)data,
785 ¬ify_get, sizeof(notify_get))) {
786 cl_dbg(dev, cl, "failed to copy data to userland\n");
787 rets = -EFAULT;
788 goto out;
789
790 }
791 break;
792
793 default:
794 rets = -ENOIOCTLCMD;
795 }
796
797out:
798 mutex_unlock(&dev->device_lock);
799 return rets;
800}
801
802/**
803 * mei_poll - the poll function
804 *
805 * @file: pointer to file structure
806 * @wait: pointer to poll_table structure
807 *
808 * Return: poll mask
809 */
810static __poll_t mei_poll(struct file *file, poll_table *wait)
811{
812 __poll_t req_events = poll_requested_events(wait);
813 struct mei_cl *cl = file->private_data;
814 struct mei_device *dev;
815 __poll_t mask = 0;
816 bool notify_en;
817
818 if (WARN_ON(!cl || !cl->dev))
819 return EPOLLERR;
820
821 dev = cl->dev;
822
823 mutex_lock(&dev->device_lock);
824
825 notify_en = cl->notify_en && (req_events & EPOLLPRI);
826
827 if (dev->dev_state != MEI_DEV_ENABLED ||
828 !mei_cl_is_connected(cl)) {
829 mask = EPOLLERR;
830 goto out;
831 }
832
833 if (notify_en) {
834 poll_wait(file, &cl->ev_wait, wait);
835 if (cl->notify_ev)
836 mask |= EPOLLPRI;
837 }
838
839 if (req_events & (EPOLLIN | EPOLLRDNORM)) {
840 poll_wait(file, &cl->rx_wait, wait);
841
842 if (mei_cl_read_cb(cl, file))
843 mask |= EPOLLIN | EPOLLRDNORM;
844 else
845 mei_cl_read_start(cl, mei_cl_mtu(cl), file);
846 }
847
848 if (req_events & (EPOLLOUT | EPOLLWRNORM)) {
849 poll_wait(file, &cl->tx_wait, wait);
850 if (cl->tx_cb_queued < dev->tx_queue_limit)
851 mask |= EPOLLOUT | EPOLLWRNORM;
852 }
853
854out:
855 mutex_unlock(&dev->device_lock);
856 return mask;
857}
858
859/**
860 * mei_cl_is_write_queued - check if the client has pending writes.
861 *
862 * @cl: writing host client
863 *
864 * Return: true if client is writing, false otherwise.
865 */
866static bool mei_cl_is_write_queued(struct mei_cl *cl)
867{
868 struct mei_device *dev = cl->dev;
869 struct mei_cl_cb *cb;
870
871 list_for_each_entry(cb, &dev->write_list, list)
872 if (cb->cl == cl)
873 return true;
874 list_for_each_entry(cb, &dev->write_waiting_list, list)
875 if (cb->cl == cl)
876 return true;
877 return false;
878}
879
880/**
881 * mei_fsync - the fsync handler
882 *
883 * @fp: pointer to file structure
884 * @start: unused
885 * @end: unused
886 * @datasync: unused
887 *
888 * Return: 0 on success, -ENODEV if client is not connected
889 */
890static int mei_fsync(struct file *fp, loff_t start, loff_t end, int datasync)
891{
892 struct mei_cl *cl = fp->private_data;
893 struct mei_device *dev;
894 int rets;
895
896 if (WARN_ON(!cl || !cl->dev))
897 return -ENODEV;
898
899 dev = cl->dev;
900
901 mutex_lock(&dev->device_lock);
902
903 if (dev->dev_state != MEI_DEV_ENABLED || !mei_cl_is_connected(cl)) {
904 rets = -ENODEV;
905 goto out;
906 }
907
908 while (mei_cl_is_write_queued(cl)) {
909 mutex_unlock(&dev->device_lock);
910 rets = wait_event_interruptible(cl->tx_wait,
911 cl->writing_state == MEI_WRITE_COMPLETE ||
912 !mei_cl_is_connected(cl));
913 mutex_lock(&dev->device_lock);
914 if (rets) {
915 if (signal_pending(current))
916 rets = -EINTR;
917 goto out;
918 }
919 if (!mei_cl_is_connected(cl)) {
920 rets = -ENODEV;
921 goto out;
922 }
923 }
924 rets = 0;
925out:
926 mutex_unlock(&dev->device_lock);
927 return rets;
928}
929
930/**
931 * mei_fasync - asynchronous io support
932 *
933 * @fd: file descriptor
934 * @file: pointer to file structure
935 * @band: band bitmap
936 *
937 * Return: negative on error,
938 * 0 if it did no changes,
939 * and positive a process was added or deleted
940 */
941static int mei_fasync(int fd, struct file *file, int band)
942{
943
944 struct mei_cl *cl = file->private_data;
945
946 if (!mei_cl_is_connected(cl))
947 return -ENODEV;
948
949 return fasync_helper(fd, file, band, &cl->ev_async);
950}
951
952/**
953 * trc_show - mei device trc attribute show method
954 *
955 * @device: device pointer
956 * @attr: attribute pointer
957 * @buf: char out buffer
958 *
959 * Return: number of the bytes printed into buf or error
960 */
961static ssize_t trc_show(struct device *device,
962 struct device_attribute *attr, char *buf)
963{
964 struct mei_device *dev = dev_get_drvdata(device);
965 u32 trc;
966 int ret;
967
968 ret = mei_trc_status(dev, &trc);
969 if (ret)
970 return ret;
971 return sprintf(buf, "%08X\n", trc);
972}
973static DEVICE_ATTR_RO(trc);
974
975/**
976 * fw_status_show - mei device fw_status attribute show method
977 *
978 * @device: device pointer
979 * @attr: attribute pointer
980 * @buf: char out buffer
981 *
982 * Return: number of the bytes printed into buf or error
983 */
984static ssize_t fw_status_show(struct device *device,
985 struct device_attribute *attr, char *buf)
986{
987 struct mei_device *mdev = dev_get_drvdata(device);
988 struct mei_fw_status fw_status;
989 int err, i;
990 ssize_t cnt = 0;
991
992 if (mdev->read_fws_need_resume) {
993 err = pm_runtime_resume_and_get(mdev->parent);
994 if (err) {
995 dev_err(device, "read fw_status resume error = %d\n", err);
996 return err;
997 }
998 }
999 scoped_guard(mutex, &mdev->device_lock)
1000 err = mei_fw_status(mdev, &fw_status);
1001 if (mdev->read_fws_need_resume)
1002 pm_runtime_put_autosuspend(mdev->parent);
1003 if (err) {
1004 dev_err(device, "read fw_status error = %d\n", err);
1005 return err;
1006 }
1007
1008 for (i = 0; i < fw_status.count; i++)
1009 cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%08X\n",
1010 fw_status.status[i]);
1011 return cnt;
1012}
1013static DEVICE_ATTR_RO(fw_status);
1014
1015/**
1016 * hbm_ver_show - display HBM protocol version negotiated with FW
1017 *
1018 * @device: device pointer
1019 * @attr: attribute pointer
1020 * @buf: char out buffer
1021 *
1022 * Return: number of the bytes printed into buf or error
1023 */
1024static ssize_t hbm_ver_show(struct device *device,
1025 struct device_attribute *attr, char *buf)
1026{
1027 struct mei_device *dev = dev_get_drvdata(device);
1028 struct hbm_version ver;
1029
1030 mutex_lock(&dev->device_lock);
1031 ver = dev->version;
1032 mutex_unlock(&dev->device_lock);
1033
1034 return sprintf(buf, "%u.%u\n", ver.major_version, ver.minor_version);
1035}
1036static DEVICE_ATTR_RO(hbm_ver);
1037
1038/**
1039 * hbm_ver_drv_show - display HBM protocol version advertised by driver
1040 *
1041 * @device: device pointer
1042 * @attr: attribute pointer
1043 * @buf: char out buffer
1044 *
1045 * Return: number of the bytes printed into buf or error
1046 */
1047static ssize_t hbm_ver_drv_show(struct device *device,
1048 struct device_attribute *attr, char *buf)
1049{
1050 return sprintf(buf, "%u.%u\n", HBM_MAJOR_VERSION, HBM_MINOR_VERSION);
1051}
1052static DEVICE_ATTR_RO(hbm_ver_drv);
1053
1054static ssize_t tx_queue_limit_show(struct device *device,
1055 struct device_attribute *attr, char *buf)
1056{
1057 struct mei_device *dev = dev_get_drvdata(device);
1058 u8 size = 0;
1059
1060 mutex_lock(&dev->device_lock);
1061 size = dev->tx_queue_limit;
1062 mutex_unlock(&dev->device_lock);
1063
1064 return sysfs_emit(buf, "%u\n", size);
1065}
1066
1067static ssize_t tx_queue_limit_store(struct device *device,
1068 struct device_attribute *attr,
1069 const char *buf, size_t count)
1070{
1071 struct mei_device *dev = dev_get_drvdata(device);
1072 u8 limit;
1073 unsigned int inp;
1074 int err;
1075
1076 err = kstrtouint(buf, 10, &inp);
1077 if (err)
1078 return err;
1079 if (inp > MEI_TX_QUEUE_LIMIT_MAX || inp < MEI_TX_QUEUE_LIMIT_MIN)
1080 return -EINVAL;
1081 limit = inp;
1082
1083 mutex_lock(&dev->device_lock);
1084 dev->tx_queue_limit = limit;
1085 mutex_unlock(&dev->device_lock);
1086
1087 return count;
1088}
1089static DEVICE_ATTR_RW(tx_queue_limit);
1090
1091/**
1092 * fw_ver_show - display ME FW version
1093 *
1094 * @device: device pointer
1095 * @attr: attribute pointer
1096 * @buf: char out buffer
1097 *
1098 * Return: number of the bytes printed into buf or error
1099 */
1100static ssize_t fw_ver_show(struct device *device,
1101 struct device_attribute *attr, char *buf)
1102{
1103 struct mei_device *dev = dev_get_drvdata(device);
1104 struct mei_fw_version *ver;
1105 ssize_t cnt = 0;
1106 int i;
1107
1108 ver = dev->fw_ver;
1109
1110 for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++)
1111 cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%u:%u.%u.%u.%u\n",
1112 ver[i].platform, ver[i].major, ver[i].minor,
1113 ver[i].hotfix, ver[i].buildno);
1114 return cnt;
1115}
1116static DEVICE_ATTR_RO(fw_ver);
1117
1118/**
1119 * dev_state_show - display device state
1120 *
1121 * @device: device pointer
1122 * @attr: attribute pointer
1123 * @buf: char out buffer
1124 *
1125 * Return: number of the bytes printed into buf or error
1126 */
1127static ssize_t dev_state_show(struct device *device,
1128 struct device_attribute *attr, char *buf)
1129{
1130 struct mei_device *dev = dev_get_drvdata(device);
1131 enum mei_dev_state dev_state;
1132
1133 mutex_lock(&dev->device_lock);
1134 dev_state = dev->dev_state;
1135 mutex_unlock(&dev->device_lock);
1136
1137 return sprintf(buf, "%s", mei_dev_state_str(dev_state));
1138}
1139static DEVICE_ATTR_RO(dev_state);
1140
1141/**
1142 * mei_set_devstate: set to new device state and notify sysfs file.
1143 *
1144 * @dev: mei_device
1145 * @state: new device state
1146 */
1147void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state)
1148{
1149 struct device *clsdev;
1150
1151 if (dev->dev_state == state)
1152 return;
1153
1154 dev->dev_state = state;
1155
1156 wake_up_interruptible_all(&dev->wait_dev_state);
1157
1158 if (!dev->cdev)
1159 return;
1160
1161 clsdev = class_find_device_by_devt(&mei_class, dev->cdev->dev);
1162 if (clsdev) {
1163 sysfs_notify(&clsdev->kobj, NULL, "dev_state");
1164 put_device(clsdev);
1165 }
1166}
1167
1168/**
1169 * kind_show - display device kind
1170 *
1171 * @device: device pointer
1172 * @attr: attribute pointer
1173 * @buf: char out buffer
1174 *
1175 * Return: number of the bytes printed into buf or error
1176 */
1177static ssize_t kind_show(struct device *device,
1178 struct device_attribute *attr, char *buf)
1179{
1180 struct mei_device *dev = dev_get_drvdata(device);
1181 ssize_t ret;
1182
1183 if (dev->kind)
1184 ret = sprintf(buf, "%s\n", dev->kind);
1185 else
1186 ret = sprintf(buf, "%s\n", "mei");
1187
1188 return ret;
1189}
1190static DEVICE_ATTR_RO(kind);
1191
1192static struct attribute *mei_attrs[] = {
1193 &dev_attr_fw_status.attr,
1194 &dev_attr_hbm_ver.attr,
1195 &dev_attr_hbm_ver_drv.attr,
1196 &dev_attr_tx_queue_limit.attr,
1197 &dev_attr_fw_ver.attr,
1198 &dev_attr_dev_state.attr,
1199 &dev_attr_trc.attr,
1200 &dev_attr_kind.attr,
1201 NULL
1202};
1203ATTRIBUTE_GROUPS(mei);
1204
1205/*
1206 * file operations structure will be used for mei char device.
1207 */
1208static const struct file_operations mei_fops = {
1209 .owner = THIS_MODULE,
1210 .read = mei_read,
1211 .unlocked_ioctl = mei_ioctl,
1212 .compat_ioctl = compat_ptr_ioctl,
1213 .open = mei_open,
1214 .release = mei_release,
1215 .write = mei_write,
1216 .poll = mei_poll,
1217 .fsync = mei_fsync,
1218 .fasync = mei_fasync,
1219};
1220
1221/**
1222 * mei_minor_get - obtain next free device minor number
1223 *
1224 * @dev: device pointer
1225 *
1226 * Return: allocated minor, or -ENOSPC if no free minor left
1227 */
1228static int mei_minor_get(struct mei_device *dev)
1229{
1230 int ret;
1231
1232 mutex_lock(&mei_minor_lock);
1233 ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL);
1234 if (ret >= 0)
1235 dev->minor = ret;
1236 else if (ret == -ENOSPC)
1237 dev_err(&dev->dev, "too many mei devices\n");
1238
1239 mutex_unlock(&mei_minor_lock);
1240 return ret;
1241}
1242
1243/**
1244 * mei_minor_free - mark device minor number as free
1245 *
1246 * @minor: minor number to free
1247 */
1248static void mei_minor_free(int minor)
1249{
1250 mutex_lock(&mei_minor_lock);
1251 idr_remove(&mei_idr, minor);
1252 mutex_unlock(&mei_minor_lock);
1253}
1254
1255static void mei_device_release(struct device *dev)
1256{
1257 kfree(dev_get_drvdata(dev));
1258}
1259
1260int mei_register(struct mei_device *dev, struct device *parent)
1261{
1262 int ret, devno;
1263 int minor;
1264
1265 ret = mei_minor_get(dev);
1266 if (ret < 0)
1267 return ret;
1268
1269 minor = dev->minor;
1270
1271 /* Fill in the data structures */
1272 devno = MKDEV(MAJOR(mei_devt), dev->minor);
1273
1274 device_initialize(&dev->dev);
1275 dev->dev.devt = devno;
1276 dev->dev.class = &mei_class;
1277 dev->dev.parent = parent;
1278 dev->dev.groups = mei_groups;
1279 dev->dev.release = mei_device_release;
1280 dev_set_drvdata(&dev->dev, dev);
1281
1282 dev->cdev = cdev_alloc();
1283 if (!dev->cdev) {
1284 ret = -ENOMEM;
1285 goto err;
1286 }
1287 dev->cdev->ops = &mei_fops;
1288 dev->cdev->owner = parent->driver->owner;
1289 cdev_set_parent(dev->cdev, &dev->dev.kobj);
1290
1291 /* Add the device */
1292 ret = cdev_add(dev->cdev, devno, 1);
1293 if (ret) {
1294 dev_err(parent, "unable to add cdev for device %d:%d\n",
1295 MAJOR(mei_devt), dev->minor);
1296 goto err_del_cdev;
1297 }
1298
1299 ret = dev_set_name(&dev->dev, "mei%d", dev->minor);
1300 if (ret) {
1301 dev_err(parent, "unable to set name to device %d:%d ret = %d\n",
1302 MAJOR(mei_devt), dev->minor, ret);
1303 goto err_del_cdev;
1304 }
1305
1306 ret = device_add(&dev->dev);
1307 if (ret) {
1308 dev_err(parent, "unable to add device %d:%d ret = %d\n",
1309 MAJOR(mei_devt), dev->minor, ret);
1310 goto err_del_cdev;
1311 }
1312
1313 mei_dbgfs_register(dev, dev_name(&dev->dev));
1314
1315 return 0;
1316
1317err_del_cdev:
1318 cdev_del(dev->cdev);
1319err:
1320 put_device(&dev->dev);
1321 mei_minor_free(minor);
1322 return ret;
1323}
1324EXPORT_SYMBOL_GPL(mei_register);
1325
1326void mei_deregister(struct mei_device *dev)
1327{
1328 int devno;
1329 int minor = dev->minor;
1330
1331 devno = dev->cdev->dev;
1332 cdev_del(dev->cdev);
1333
1334 mei_dbgfs_deregister(dev);
1335
1336 device_destroy(&mei_class, devno);
1337
1338 mei_minor_free(minor);
1339}
1340EXPORT_SYMBOL_GPL(mei_deregister);
1341
1342static int __init mei_init(void)
1343{
1344 int ret;
1345
1346 ret = class_register(&mei_class);
1347 if (ret)
1348 return ret;
1349
1350 ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei");
1351 if (ret < 0) {
1352 pr_err("unable to allocate char dev region\n");
1353 goto err_class;
1354 }
1355
1356 ret = mei_cl_bus_init();
1357 if (ret < 0) {
1358 pr_err("unable to initialize bus\n");
1359 goto err_chrdev;
1360 }
1361
1362 return 0;
1363
1364err_chrdev:
1365 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
1366err_class:
1367 class_unregister(&mei_class);
1368 return ret;
1369}
1370
1371static void __exit mei_exit(void)
1372{
1373 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
1374 class_unregister(&mei_class);
1375 mei_cl_bus_exit();
1376}
1377
1378module_init(mei_init);
1379module_exit(mei_exit);
1380
1381MODULE_AUTHOR("Intel Corporation");
1382MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
1383MODULE_LICENSE("GPL v2");
1384