Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * GPIO driver for virtio-based virtual GPIO controllers
4 *
5 * Copyright (C) 2021 metux IT consult
6 * Enrico Weigelt, metux IT consult <info@metux.net>
7 *
8 * Copyright (C) 2021 Linaro.
9 * Viresh Kumar <viresh.kumar@linaro.org>
10 */
11
12#include <linux/completion.h>
13#include <linux/dma-mapping.h>
14#include <linux/err.h>
15#include <linux/gpio/driver.h>
16#include <linux/io.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/mutex.h>
20#include <linux/spinlock.h>
21#include <linux/virtio_config.h>
22#include <uapi/linux/virtio_gpio.h>
23#include <uapi/linux/virtio_ids.h>
24
25struct virtio_gpio_line {
26 struct mutex lock; /* Protects line operation */
27 struct completion completion;
28
29 unsigned int rxlen;
30
31 __dma_from_device_group_begin();
32 struct virtio_gpio_request req;
33 struct virtio_gpio_response res;
34 __dma_from_device_group_end();
35};
36
37struct vgpio_irq_line {
38 u8 type;
39 bool disabled;
40 bool masked;
41 bool queued;
42 bool update_pending;
43 bool queue_pending;
44
45 __dma_from_device_group_begin();
46 struct virtio_gpio_irq_request ireq;
47 struct virtio_gpio_irq_response ires;
48 __dma_from_device_group_end();
49};
50
51struct virtio_gpio {
52 struct virtio_device *vdev;
53 struct mutex lock; /* Protects virtqueue operation */
54 struct gpio_chip gc;
55 struct virtio_gpio_line *lines;
56 struct virtqueue *request_vq;
57
58 /* irq support */
59 struct virtqueue *event_vq;
60 struct mutex irq_lock; /* Protects irq operation */
61 raw_spinlock_t eventq_lock; /* Protects queuing of the buffer */
62 struct vgpio_irq_line *irq_lines;
63};
64
65static int _virtio_gpio_req(struct virtio_gpio *vgpio, u16 type, u16 gpio,
66 u8 txvalue, u8 *rxvalue, void *response, u32 rxlen)
67{
68 struct virtio_gpio_line *line = &vgpio->lines[gpio];
69 struct virtio_gpio_request *req = &line->req;
70 struct virtio_gpio_response *res = response;
71 struct scatterlist *sgs[2], req_sg, res_sg;
72 struct device *dev = &vgpio->vdev->dev;
73 int ret;
74
75 /*
76 * Prevent concurrent requests for the same line since we have
77 * pre-allocated request/response buffers for each GPIO line. Moreover
78 * Linux always accesses a GPIO line sequentially, so this locking shall
79 * always go through without any delays.
80 */
81 mutex_lock(&line->lock);
82
83 req->type = cpu_to_le16(type);
84 req->gpio = cpu_to_le16(gpio);
85 req->value = cpu_to_le32(txvalue);
86
87 sg_init_one(&req_sg, req, sizeof(*req));
88 sg_init_one(&res_sg, res, rxlen);
89 sgs[0] = &req_sg;
90 sgs[1] = &res_sg;
91
92 line->rxlen = 0;
93 reinit_completion(&line->completion);
94
95 /*
96 * Virtqueue callers need to ensure they don't call its APIs with other
97 * virtqueue operations at the same time.
98 */
99 mutex_lock(&vgpio->lock);
100 ret = virtqueue_add_sgs(vgpio->request_vq, sgs, 1, 1, line, GFP_KERNEL);
101 if (ret) {
102 dev_err(dev, "failed to add request to vq\n");
103 mutex_unlock(&vgpio->lock);
104 goto out;
105 }
106
107 virtqueue_kick(vgpio->request_vq);
108 mutex_unlock(&vgpio->lock);
109
110 wait_for_completion(&line->completion);
111
112 if (unlikely(res->status != VIRTIO_GPIO_STATUS_OK)) {
113 dev_err(dev, "GPIO request failed: %d\n", gpio);
114 ret = -EINVAL;
115 goto out;
116 }
117
118 if (unlikely(line->rxlen != rxlen)) {
119 dev_err(dev, "GPIO operation returned incorrect len (%u : %u)\n",
120 rxlen, line->rxlen);
121 ret = -EINVAL;
122 goto out;
123 }
124
125 if (rxvalue)
126 *rxvalue = res->value;
127
128out:
129 mutex_unlock(&line->lock);
130 return ret;
131}
132
133static int virtio_gpio_req(struct virtio_gpio *vgpio, u16 type, u16 gpio,
134 u8 txvalue, u8 *rxvalue)
135{
136 struct virtio_gpio_line *line = &vgpio->lines[gpio];
137 struct virtio_gpio_response *res = &line->res;
138
139 return _virtio_gpio_req(vgpio, type, gpio, txvalue, rxvalue, res,
140 sizeof(*res));
141}
142
143static void virtio_gpio_free(struct gpio_chip *gc, unsigned int gpio)
144{
145 struct virtio_gpio *vgpio = gpiochip_get_data(gc);
146
147 virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_DIRECTION, gpio,
148 VIRTIO_GPIO_DIRECTION_NONE, NULL);
149}
150
151static int virtio_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
152{
153 struct virtio_gpio *vgpio = gpiochip_get_data(gc);
154 u8 direction;
155 int ret;
156
157 ret = virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_GET_DIRECTION, gpio, 0,
158 &direction);
159 if (ret)
160 return ret;
161
162 switch (direction) {
163 case VIRTIO_GPIO_DIRECTION_IN:
164 return GPIO_LINE_DIRECTION_IN;
165 case VIRTIO_GPIO_DIRECTION_OUT:
166 return GPIO_LINE_DIRECTION_OUT;
167 default:
168 return -EINVAL;
169 }
170}
171
172static int virtio_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio)
173{
174 struct virtio_gpio *vgpio = gpiochip_get_data(gc);
175
176 return virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_DIRECTION, gpio,
177 VIRTIO_GPIO_DIRECTION_IN, NULL);
178}
179
180static int virtio_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio,
181 int value)
182{
183 struct virtio_gpio *vgpio = gpiochip_get_data(gc);
184 int ret;
185
186 ret = virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_VALUE, gpio, value, NULL);
187 if (ret)
188 return ret;
189
190 return virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_DIRECTION, gpio,
191 VIRTIO_GPIO_DIRECTION_OUT, NULL);
192}
193
194static int virtio_gpio_get(struct gpio_chip *gc, unsigned int gpio)
195{
196 struct virtio_gpio *vgpio = gpiochip_get_data(gc);
197 u8 value;
198 int ret;
199
200 ret = virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_GET_VALUE, gpio, 0, &value);
201 return ret ? ret : value;
202}
203
204static int virtio_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
205{
206 struct virtio_gpio *vgpio = gpiochip_get_data(gc);
207
208 return virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_VALUE, gpio, value,
209 NULL);
210}
211
212/* Interrupt handling */
213static void virtio_gpio_irq_prepare(struct virtio_gpio *vgpio, u16 gpio)
214{
215 struct vgpio_irq_line *irq_line = &vgpio->irq_lines[gpio];
216 struct virtio_gpio_irq_request *ireq = &irq_line->ireq;
217 struct virtio_gpio_irq_response *ires = &irq_line->ires;
218 struct scatterlist *sgs[2], req_sg, res_sg;
219 int ret;
220
221 if (WARN_ON(irq_line->queued || irq_line->masked || irq_line->disabled))
222 return;
223
224 ireq->gpio = cpu_to_le16(gpio);
225 sg_init_one(&req_sg, ireq, sizeof(*ireq));
226 sg_init_one(&res_sg, ires, sizeof(*ires));
227 sgs[0] = &req_sg;
228 sgs[1] = &res_sg;
229
230 ret = virtqueue_add_sgs(vgpio->event_vq, sgs, 1, 1, irq_line, GFP_ATOMIC);
231 if (ret) {
232 dev_err(&vgpio->vdev->dev, "failed to add request to eventq\n");
233 return;
234 }
235
236 irq_line->queued = true;
237 virtqueue_kick(vgpio->event_vq);
238}
239
240static void virtio_gpio_irq_enable(struct irq_data *d)
241{
242 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
243 struct virtio_gpio *vgpio = gpiochip_get_data(gc);
244 struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq];
245
246 raw_spin_lock(&vgpio->eventq_lock);
247 irq_line->disabled = false;
248 irq_line->masked = false;
249 irq_line->queue_pending = true;
250 raw_spin_unlock(&vgpio->eventq_lock);
251
252 irq_line->update_pending = true;
253}
254
255static void virtio_gpio_irq_disable(struct irq_data *d)
256{
257 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
258 struct virtio_gpio *vgpio = gpiochip_get_data(gc);
259 struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq];
260
261 raw_spin_lock(&vgpio->eventq_lock);
262 irq_line->disabled = true;
263 irq_line->masked = true;
264 irq_line->queue_pending = false;
265 raw_spin_unlock(&vgpio->eventq_lock);
266
267 irq_line->update_pending = true;
268}
269
270static void virtio_gpio_irq_mask(struct irq_data *d)
271{
272 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
273 struct virtio_gpio *vgpio = gpiochip_get_data(gc);
274 struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq];
275
276 raw_spin_lock(&vgpio->eventq_lock);
277 irq_line->masked = true;
278 raw_spin_unlock(&vgpio->eventq_lock);
279}
280
281static void virtio_gpio_irq_unmask(struct irq_data *d)
282{
283 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
284 struct virtio_gpio *vgpio = gpiochip_get_data(gc);
285 struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq];
286
287 raw_spin_lock(&vgpio->eventq_lock);
288 irq_line->masked = false;
289
290 /* Queue the buffer unconditionally on unmask */
291 virtio_gpio_irq_prepare(vgpio, d->hwirq);
292 raw_spin_unlock(&vgpio->eventq_lock);
293}
294
295static int virtio_gpio_irq_set_type(struct irq_data *d, unsigned int type)
296{
297 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
298 struct virtio_gpio *vgpio = gpiochip_get_data(gc);
299 struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq];
300
301 switch (type) {
302 case IRQ_TYPE_EDGE_RISING:
303 type = VIRTIO_GPIO_IRQ_TYPE_EDGE_RISING;
304 break;
305 case IRQ_TYPE_EDGE_FALLING:
306 type = VIRTIO_GPIO_IRQ_TYPE_EDGE_FALLING;
307 break;
308 case IRQ_TYPE_EDGE_BOTH:
309 type = VIRTIO_GPIO_IRQ_TYPE_EDGE_BOTH;
310 break;
311 case IRQ_TYPE_LEVEL_LOW:
312 type = VIRTIO_GPIO_IRQ_TYPE_LEVEL_LOW;
313 break;
314 case IRQ_TYPE_LEVEL_HIGH:
315 type = VIRTIO_GPIO_IRQ_TYPE_LEVEL_HIGH;
316 break;
317 default:
318 dev_err(&vgpio->vdev->dev, "unsupported irq type: %u\n", type);
319 return -EINVAL;
320 }
321
322 irq_line->type = type;
323 irq_line->update_pending = true;
324
325 return 0;
326}
327
328static void virtio_gpio_irq_bus_lock(struct irq_data *d)
329{
330 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
331 struct virtio_gpio *vgpio = gpiochip_get_data(gc);
332
333 mutex_lock(&vgpio->irq_lock);
334}
335
336static void virtio_gpio_irq_bus_sync_unlock(struct irq_data *d)
337{
338 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
339 struct virtio_gpio *vgpio = gpiochip_get_data(gc);
340 struct vgpio_irq_line *irq_line = &vgpio->irq_lines[d->hwirq];
341 u8 type = irq_line->disabled ? VIRTIO_GPIO_IRQ_TYPE_NONE : irq_line->type;
342 unsigned long flags;
343
344 if (irq_line->update_pending) {
345 irq_line->update_pending = false;
346 virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_IRQ_TYPE, d->hwirq, type,
347 NULL);
348
349 /* Queue the buffer only after interrupt is enabled */
350 raw_spin_lock_irqsave(&vgpio->eventq_lock, flags);
351 if (irq_line->queue_pending) {
352 irq_line->queue_pending = false;
353 virtio_gpio_irq_prepare(vgpio, d->hwirq);
354 }
355 raw_spin_unlock_irqrestore(&vgpio->eventq_lock, flags);
356 }
357
358 mutex_unlock(&vgpio->irq_lock);
359}
360
361static bool ignore_irq(struct virtio_gpio *vgpio, int gpio,
362 struct vgpio_irq_line *irq_line)
363{
364 bool ignore = false;
365
366 raw_spin_lock(&vgpio->eventq_lock);
367 irq_line->queued = false;
368
369 /* Interrupt is disabled currently */
370 if (irq_line->masked || irq_line->disabled) {
371 ignore = true;
372 goto unlock;
373 }
374
375 /*
376 * Buffer is returned as the interrupt was disabled earlier, but is
377 * enabled again now. Requeue the buffers.
378 */
379 if (irq_line->ires.status == VIRTIO_GPIO_IRQ_STATUS_INVALID) {
380 virtio_gpio_irq_prepare(vgpio, gpio);
381 ignore = true;
382 goto unlock;
383 }
384
385 if (WARN_ON(irq_line->ires.status != VIRTIO_GPIO_IRQ_STATUS_VALID))
386 ignore = true;
387
388unlock:
389 raw_spin_unlock(&vgpio->eventq_lock);
390
391 return ignore;
392}
393
394static void virtio_gpio_event_vq(struct virtqueue *vq)
395{
396 struct virtio_gpio *vgpio = vq->vdev->priv;
397 struct device *dev = &vgpio->vdev->dev;
398 struct vgpio_irq_line *irq_line;
399 int gpio, ret;
400 unsigned int len;
401
402 while (true) {
403 irq_line = virtqueue_get_buf(vgpio->event_vq, &len);
404 if (!irq_line)
405 break;
406
407 if (len != sizeof(irq_line->ires)) {
408 dev_err(dev, "irq with incorrect length (%u : %u)\n",
409 len, (unsigned int)sizeof(irq_line->ires));
410 continue;
411 }
412
413 /*
414 * Find GPIO line number from the offset of irq_line within the
415 * irq_lines block. We can also get GPIO number from
416 * irq-request, but better not to rely on a buffer returned by
417 * remote.
418 */
419 gpio = irq_line - vgpio->irq_lines;
420 WARN_ON(gpio >= vgpio->gc.ngpio);
421
422 if (unlikely(ignore_irq(vgpio, gpio, irq_line)))
423 continue;
424
425 ret = generic_handle_domain_irq(vgpio->gc.irq.domain, gpio);
426 if (ret)
427 dev_err(dev, "failed to handle interrupt: %d\n", ret);
428 }
429}
430
431static void virtio_gpio_request_vq(struct virtqueue *vq)
432{
433 struct virtio_gpio_line *line;
434 unsigned int len;
435
436 do {
437 line = virtqueue_get_buf(vq, &len);
438 if (!line)
439 return;
440
441 line->rxlen = len;
442 complete(&line->completion);
443 } while (1);
444}
445
446static void virtio_gpio_free_vqs(struct virtio_device *vdev)
447{
448 virtio_reset_device(vdev);
449 vdev->config->del_vqs(vdev);
450}
451
452static int virtio_gpio_alloc_vqs(struct virtio_gpio *vgpio,
453 struct virtio_device *vdev)
454{
455 struct virtqueue_info vqs_info[] = {
456 { "requestq", virtio_gpio_request_vq },
457 { "eventq", virtio_gpio_event_vq },
458 };
459 struct virtqueue *vqs[2] = { NULL, NULL };
460 int ret;
461
462 ret = virtio_find_vqs(vdev, vgpio->irq_lines ? 2 : 1, vqs,
463 vqs_info, NULL);
464 if (ret) {
465 dev_err(&vdev->dev, "failed to find vqs: %d\n", ret);
466 return ret;
467 }
468
469 if (!vqs[0]) {
470 dev_err(&vdev->dev, "failed to find requestq vq\n");
471 goto out;
472 }
473 vgpio->request_vq = vqs[0];
474
475 if (vgpio->irq_lines && !vqs[1]) {
476 dev_err(&vdev->dev, "failed to find eventq vq\n");
477 goto out;
478 }
479 vgpio->event_vq = vqs[1];
480
481 return 0;
482
483out:
484 if (vqs[0] || vqs[1])
485 virtio_gpio_free_vqs(vdev);
486
487 return -ENODEV;
488}
489
490static const char **virtio_gpio_get_names(struct virtio_gpio *vgpio,
491 u32 gpio_names_size, u16 ngpio)
492{
493 struct virtio_gpio_response_get_names *res;
494 struct device *dev = &vgpio->vdev->dev;
495 u8 *gpio_names, *str;
496 const char **names;
497 int i, ret, len;
498
499 if (!gpio_names_size)
500 return NULL;
501
502 len = sizeof(*res) + gpio_names_size;
503 res = devm_kzalloc(dev, len, GFP_KERNEL);
504 if (!res)
505 return NULL;
506 gpio_names = res->value;
507
508 ret = _virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_GET_NAMES, 0, 0, NULL,
509 res, len);
510 if (ret) {
511 dev_err(dev, "Failed to get GPIO names: %d\n", ret);
512 return NULL;
513 }
514
515 names = devm_kcalloc(dev, ngpio, sizeof(*names), GFP_KERNEL);
516 if (!names)
517 return NULL;
518
519 /* NULL terminate the string instead of checking it */
520 gpio_names[gpio_names_size - 1] = '\0';
521
522 for (i = 0, str = gpio_names; i < ngpio; i++) {
523 names[i] = str;
524 str += strlen(str) + 1; /* zero-length strings are allowed */
525
526 if (str > gpio_names + gpio_names_size) {
527 dev_err(dev, "gpio_names block is too short (%d)\n", i);
528 return NULL;
529 }
530 }
531
532 return names;
533}
534
535static int virtio_gpio_probe(struct virtio_device *vdev)
536{
537 struct device *dev = &vdev->dev;
538 struct virtio_gpio *vgpio;
539 struct irq_chip *gpio_irq_chip;
540 u32 gpio_names_size;
541 u16 ngpio;
542 int ret, i;
543
544 vgpio = devm_kzalloc(dev, sizeof(*vgpio), GFP_KERNEL);
545 if (!vgpio)
546 return -ENOMEM;
547
548 /* Read configuration */
549 gpio_names_size =
550 virtio_cread32(vdev, offsetof(struct virtio_gpio_config,
551 gpio_names_size));
552 ngpio = virtio_cread16(vdev, offsetof(struct virtio_gpio_config,
553 ngpio));
554 if (!ngpio) {
555 dev_err(dev, "Number of GPIOs can't be zero\n");
556 return -EINVAL;
557 }
558
559 vgpio->lines = devm_kcalloc(dev, ngpio, sizeof(*vgpio->lines), GFP_KERNEL);
560 if (!vgpio->lines)
561 return -ENOMEM;
562
563 for (i = 0; i < ngpio; i++) {
564 mutex_init(&vgpio->lines[i].lock);
565 init_completion(&vgpio->lines[i].completion);
566 }
567
568 mutex_init(&vgpio->lock);
569 vdev->priv = vgpio;
570
571 vgpio->vdev = vdev;
572 vgpio->gc.free = virtio_gpio_free;
573 vgpio->gc.get_direction = virtio_gpio_get_direction;
574 vgpio->gc.direction_input = virtio_gpio_direction_input;
575 vgpio->gc.direction_output = virtio_gpio_direction_output;
576 vgpio->gc.get = virtio_gpio_get;
577 vgpio->gc.set = virtio_gpio_set;
578 vgpio->gc.ngpio = ngpio;
579 vgpio->gc.base = -1; /* Allocate base dynamically */
580 vgpio->gc.label = dev_name(dev);
581 vgpio->gc.parent = dev;
582 vgpio->gc.owner = THIS_MODULE;
583 vgpio->gc.can_sleep = true;
584
585 /* Interrupt support */
586 if (virtio_has_feature(vdev, VIRTIO_GPIO_F_IRQ)) {
587 vgpio->irq_lines = devm_kcalloc(dev, ngpio, sizeof(*vgpio->irq_lines), GFP_KERNEL);
588 if (!vgpio->irq_lines)
589 return -ENOMEM;
590
591 gpio_irq_chip = devm_kzalloc(dev, sizeof(*gpio_irq_chip), GFP_KERNEL);
592 if (!gpio_irq_chip)
593 return -ENOMEM;
594
595 gpio_irq_chip->name = dev_name(dev);
596 gpio_irq_chip->irq_enable = virtio_gpio_irq_enable;
597 gpio_irq_chip->irq_disable = virtio_gpio_irq_disable;
598 gpio_irq_chip->irq_mask = virtio_gpio_irq_mask;
599 gpio_irq_chip->irq_unmask = virtio_gpio_irq_unmask;
600 gpio_irq_chip->irq_set_type = virtio_gpio_irq_set_type;
601 gpio_irq_chip->irq_bus_lock = virtio_gpio_irq_bus_lock;
602 gpio_irq_chip->irq_bus_sync_unlock = virtio_gpio_irq_bus_sync_unlock;
603
604 /* The event comes from the outside so no parent handler */
605 vgpio->gc.irq.parent_handler = NULL;
606 vgpio->gc.irq.num_parents = 0;
607 vgpio->gc.irq.parents = NULL;
608 vgpio->gc.irq.default_type = IRQ_TYPE_NONE;
609 vgpio->gc.irq.handler = handle_level_irq;
610 vgpio->gc.irq.chip = gpio_irq_chip;
611
612 for (i = 0; i < ngpio; i++) {
613 vgpio->irq_lines[i].type = VIRTIO_GPIO_IRQ_TYPE_NONE;
614 vgpio->irq_lines[i].disabled = true;
615 vgpio->irq_lines[i].masked = true;
616 }
617
618 mutex_init(&vgpio->irq_lock);
619 raw_spin_lock_init(&vgpio->eventq_lock);
620 }
621
622 ret = virtio_gpio_alloc_vqs(vgpio, vdev);
623 if (ret)
624 return ret;
625
626 /* Mark the device ready to perform operations from within probe() */
627 virtio_device_ready(vdev);
628
629 vgpio->gc.names = virtio_gpio_get_names(vgpio, gpio_names_size, ngpio);
630
631 ret = gpiochip_add_data(&vgpio->gc, vgpio);
632 if (ret) {
633 virtio_gpio_free_vqs(vdev);
634 dev_err(dev, "Failed to add virtio-gpio controller\n");
635 }
636
637 return ret;
638}
639
640static void virtio_gpio_remove(struct virtio_device *vdev)
641{
642 struct virtio_gpio *vgpio = vdev->priv;
643
644 gpiochip_remove(&vgpio->gc);
645 virtio_gpio_free_vqs(vdev);
646}
647
648static const struct virtio_device_id id_table[] = {
649 { VIRTIO_ID_GPIO, VIRTIO_DEV_ANY_ID },
650 {},
651};
652MODULE_DEVICE_TABLE(virtio, id_table);
653
654static const unsigned int features[] = {
655 VIRTIO_GPIO_F_IRQ,
656};
657
658static struct virtio_driver virtio_gpio_driver = {
659 .feature_table = features,
660 .feature_table_size = ARRAY_SIZE(features),
661 .id_table = id_table,
662 .probe = virtio_gpio_probe,
663 .remove = virtio_gpio_remove,
664 .driver = {
665 .name = KBUILD_MODNAME,
666 },
667};
668module_virtio_driver(virtio_gpio_driver);
669
670MODULE_AUTHOR("Enrico Weigelt, metux IT consult <info@metux.net>");
671MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
672MODULE_DESCRIPTION("VirtIO GPIO driver");
673MODULE_LICENSE("GPL");