Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

vfs: do bulk POLL* -> EPOLL* replacement

This is the mindless scripted replacement of kernel use of POLL*
variables as described by Al, done by this script:

for V in IN OUT PRI ERR RDNORM RDBAND WRNORM WRBAND HUP RDHUP NVAL MSG; do
L=`git grep -l -w POLL$V | grep -v '^t' | grep -v /um/ | grep -v '^sa' | grep -v '/poll.h$'|grep -v '^D'`
for f in $L; do sed -i "-es/^\([^\"]*\)\(\<POLL$V\>\)/\\1E\\2/" $f; done
done

with de-mangling cleanups yet to come.

NOTE! On almost all architectures, the EPOLL* constants have the same
values as the POLL* constants do. But they keyword here is "almost".
For various bad reasons they aren't the same, and epoll() doesn't
actually work quite correctly in some cases due to this on Sparc et al.

The next patch from Al will sort out the final differences, and we
should be all done.

Scripted-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

+913 -913
+1 -1
arch/cris/arch-v10/drivers/gpio.c
··· 173 173 174 174 if ((data & priv->highalarm) || 175 175 (~data & priv->lowalarm)) { 176 - mask = POLLIN|POLLRDNORM; 176 + mask = EPOLLIN|EPOLLRDNORM; 177 177 } 178 178 179 179 out:
+4 -4
arch/cris/arch-v10/drivers/sync_serial.c
··· 666 666 poll_wait(file, &port->in_wait_q, wait); 667 667 /* Some room to write */ 668 668 if (port->out_count < OUT_BUFFER_SIZE) 669 - mask |= POLLOUT | POLLWRNORM; 669 + mask |= EPOLLOUT | EPOLLWRNORM; 670 670 /* At least an inbufchunk of data */ 671 671 if (sync_data_avail(port) >= port->inbufchunk) 672 - mask |= POLLIN | POLLRDNORM; 672 + mask |= EPOLLIN | EPOLLRDNORM; 673 673 674 674 DEBUGPOLL(if (mask != prev_mask) 675 675 printk(KERN_DEBUG "sync_serial_poll: mask 0x%08X %s %s\n", 676 676 mask, 677 - mask & POLLOUT ? "POLLOUT" : "", 678 - mask & POLLIN ? "POLLIN" : ""); 677 + mask & EPOLLOUT ? "POLLOUT" : "", 678 + mask & EPOLLIN ? "POLLIN" : ""); 679 679 prev_mask = mask; 680 680 ); 681 681 return mask;
+5 -5
arch/cris/arch-v32/drivers/sync_serial.c
··· 574 574 575 575 /* No active transfer, descriptors are available */ 576 576 if (port->output && !port->tr_running) 577 - mask |= POLLOUT | POLLWRNORM; 577 + mask |= EPOLLOUT | EPOLLWRNORM; 578 578 579 579 /* Descriptor and buffer space available. */ 580 580 if (port->output && 581 581 port->active_tr_descr != port->catch_tr_descr && 582 582 port->out_buf_count < OUT_BUFFER_SIZE) 583 - mask |= POLLOUT | POLLWRNORM; 583 + mask |= EPOLLOUT | EPOLLWRNORM; 584 584 585 585 /* At least an inbufchunk of data */ 586 586 if (port->input && sync_data_avail(port) >= port->inbufchunk) 587 - mask |= POLLIN | POLLRDNORM; 587 + mask |= EPOLLIN | EPOLLRDNORM; 588 588 589 589 DEBUGPOLL( 590 590 if (mask != prev_mask) 591 591 pr_info("sync_serial_poll: mask 0x%08X %s %s\n", 592 592 mask, 593 - mask & POLLOUT ? "POLLOUT" : "", 594 - mask & POLLIN ? "POLLIN" : ""); 593 + mask & EPOLLOUT ? "POLLOUT" : "", 594 + mask & EPOLLIN ? "POLLIN" : ""); 595 595 prev_mask = mask; 596 596 ); 597 597 return mask;
+1 -1
arch/ia64/kernel/perfmon.c
··· 1670 1670 PROTECT_CTX(ctx, flags); 1671 1671 1672 1672 if (PFM_CTXQ_EMPTY(ctx) == 0) 1673 - mask = POLLIN | POLLRDNORM; 1673 + mask = EPOLLIN | EPOLLRDNORM; 1674 1674 1675 1675 UNPROTECT_CTX(ctx, flags); 1676 1676
+2 -2
arch/mips/kernel/rtlx.c
··· 349 349 350 350 /* data available to read? */ 351 351 if (rtlx_read_poll(minor, 0)) 352 - mask |= POLLIN | POLLRDNORM; 352 + mask |= EPOLLIN | EPOLLRDNORM; 353 353 354 354 /* space to write */ 355 355 if (rtlx_write_poll(minor)) 356 - mask |= POLLOUT | POLLWRNORM; 356 + mask |= EPOLLOUT | EPOLLWRNORM; 357 357 358 358 return mask; 359 359 }
+1 -1
arch/powerpc/kernel/rtasd.c
··· 392 392 { 393 393 poll_wait(file, &rtas_log_wait, wait); 394 394 if (rtas_log_size) 395 - return POLLIN | POLLRDNORM; 395 + return EPOLLIN | EPOLLRDNORM; 396 396 return 0; 397 397 } 398 398
+4 -4
arch/powerpc/platforms/cell/spufs/backing_ops.c
··· 101 101 but first mark any pending interrupts as done so 102 102 we don't get woken up unnecessarily */ 103 103 104 - if (events & (POLLIN | POLLRDNORM)) { 104 + if (events & (EPOLLIN | EPOLLRDNORM)) { 105 105 if (stat & 0xff0000) 106 - ret |= POLLIN | POLLRDNORM; 106 + ret |= EPOLLIN | EPOLLRDNORM; 107 107 else { 108 108 ctx->csa.priv1.int_stat_class2_RW &= 109 109 ~CLASS2_MAILBOX_INTR; ··· 111 111 CLASS2_ENABLE_MAILBOX_INTR; 112 112 } 113 113 } 114 - if (events & (POLLOUT | POLLWRNORM)) { 114 + if (events & (EPOLLOUT | EPOLLWRNORM)) { 115 115 if (stat & 0x00ff00) 116 - ret = POLLOUT | POLLWRNORM; 116 + ret = EPOLLOUT | EPOLLWRNORM; 117 117 else { 118 118 ctx->csa.priv1.int_stat_class2_RW &= 119 119 ~CLASS2_MAILBOX_THRESHOLD_INTR;
+5 -5
arch/powerpc/platforms/cell/spufs/file.c
··· 774 774 * that poll should not sleep. Will be fixed later. 775 775 */ 776 776 mutex_lock(&ctx->state_mutex); 777 - mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM); 777 + mask = ctx->ops->mbox_stat_poll(ctx, EPOLLIN | EPOLLRDNORM); 778 778 spu_release(ctx); 779 779 780 780 return mask; ··· 910 910 * that poll should not sleep. Will be fixed later. 911 911 */ 912 912 mutex_lock(&ctx->state_mutex); 913 - mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM); 913 + mask = ctx->ops->mbox_stat_poll(ctx, EPOLLOUT | EPOLLWRNORM); 914 914 spu_release(ctx); 915 915 916 916 return mask; ··· 1710 1710 1711 1711 mask = 0; 1712 1712 if (free_elements & 0xffff) 1713 - mask |= POLLOUT | POLLWRNORM; 1713 + mask |= EPOLLOUT | EPOLLWRNORM; 1714 1714 if (tagstatus & ctx->tagwait) 1715 - mask |= POLLIN | POLLRDNORM; 1715 + mask |= EPOLLIN | EPOLLRDNORM; 1716 1716 1717 1717 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__, 1718 1718 free_elements, tagstatus, ctx->tagwait); ··· 2469 2469 return rc; 2470 2470 2471 2471 if (spufs_switch_log_used(ctx) > 0) 2472 - mask |= POLLIN; 2472 + mask |= EPOLLIN; 2473 2473 2474 2474 spu_release(ctx); 2475 2475
+4 -4
arch/powerpc/platforms/cell/spufs/hw_ops.c
··· 70 70 but first mark any pending interrupts as done so 71 71 we don't get woken up unnecessarily */ 72 72 73 - if (events & (POLLIN | POLLRDNORM)) { 73 + if (events & (EPOLLIN | EPOLLRDNORM)) { 74 74 if (stat & 0xff0000) 75 - ret |= POLLIN | POLLRDNORM; 75 + ret |= EPOLLIN | EPOLLRDNORM; 76 76 else { 77 77 spu_int_stat_clear(spu, 2, CLASS2_MAILBOX_INTR); 78 78 spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR); 79 79 } 80 80 } 81 - if (events & (POLLOUT | POLLWRNORM)) { 81 + if (events & (EPOLLOUT | EPOLLWRNORM)) { 82 82 if (stat & 0x00ff00) 83 - ret = POLLOUT | POLLWRNORM; 83 + ret = EPOLLOUT | EPOLLWRNORM; 84 84 else { 85 85 spu_int_stat_clear(spu, 2, 86 86 CLASS2_MAILBOX_THRESHOLD_INTR);
+1 -1
arch/powerpc/platforms/powernv/opal-prd.c
··· 153 153 poll_wait(file, &opal_prd_msg_wait, wait); 154 154 155 155 if (!opal_msg_queue_empty()) 156 - return POLLIN | POLLRDNORM; 156 + return EPOLLIN | EPOLLRDNORM; 157 157 158 158 return 0; 159 159 }
+1 -1
arch/x86/kernel/apm_32.c
··· 1515 1515 return 0; 1516 1516 poll_wait(fp, &apm_waitqueue, wait); 1517 1517 if (!queue_empty(as)) 1518 - return POLLIN | POLLRDNORM; 1518 + return EPOLLIN | EPOLLRDNORM; 1519 1519 return 0; 1520 1520 } 1521 1521
+2 -2
arch/x86/kernel/cpu/mcheck/dev-mcelog.c
··· 247 247 { 248 248 poll_wait(file, &mce_chrdev_wait, wait); 249 249 if (READ_ONCE(mcelog.next)) 250 - return POLLIN | POLLRDNORM; 250 + return EPOLLIN | EPOLLRDNORM; 251 251 if (!mce_apei_read_done && apei_check_mce()) 252 - return POLLIN | POLLRDNORM; 252 + return EPOLLIN | EPOLLRDNORM; 253 253 return 0; 254 254 } 255 255
+2 -2
block/bsg.c
··· 849 849 850 850 spin_lock_irq(&bd->lock); 851 851 if (!list_empty(&bd->done_list)) 852 - mask |= POLLIN | POLLRDNORM; 852 + mask |= EPOLLIN | EPOLLRDNORM; 853 853 if (bd->queued_cmds < bd->max_queue) 854 - mask |= POLLOUT; 854 + mask |= EPOLLOUT; 855 855 spin_unlock_irq(&bd->lock); 856 856 857 857 return mask;
+8 -8
crypto/af_alg.c
··· 735 735 rcu_read_lock(); 736 736 wq = rcu_dereference(sk->sk_wq); 737 737 if (skwq_has_sleeper(wq)) 738 - wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 739 - POLLRDNORM | 740 - POLLRDBAND); 738 + wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | 739 + EPOLLRDNORM | 740 + EPOLLRDBAND); 741 741 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 742 742 rcu_read_unlock(); 743 743 } ··· 800 800 rcu_read_lock(); 801 801 wq = rcu_dereference(sk->sk_wq); 802 802 if (skwq_has_sleeper(wq)) 803 - wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 804 - POLLRDNORM | 805 - POLLRDBAND); 803 + wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 804 + EPOLLRDNORM | 805 + EPOLLRDBAND); 806 806 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 807 807 rcu_read_unlock(); 808 808 } ··· 1076 1076 mask = 0; 1077 1077 1078 1078 if (!ctx->more || ctx->used) 1079 - mask |= POLLIN | POLLRDNORM; 1079 + mask |= EPOLLIN | EPOLLRDNORM; 1080 1080 1081 1081 if (af_alg_writable(sk)) 1082 - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 1082 + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 1083 1083 1084 1084 return mask; 1085 1085 }
+2 -2
drivers/acpi/acpi_dbg.c
··· 724 724 725 725 poll_wait(file, &acpi_aml_io.wait, wait); 726 726 if (acpi_aml_user_readable()) 727 - masks |= POLLIN | POLLRDNORM; 727 + masks |= EPOLLIN | EPOLLRDNORM; 728 728 if (acpi_aml_user_writable()) 729 - masks |= POLLOUT | POLLWRNORM; 729 + masks |= EPOLLOUT | EPOLLWRNORM; 730 730 731 731 return masks; 732 732 }
+2 -2
drivers/android/binder.c
··· 4371 4371 */ 4372 4372 if ((thread->looper & BINDER_LOOPER_STATE_POLL) && 4373 4373 waitqueue_active(&thread->wait)) { 4374 - wake_up_poll(&thread->wait, POLLHUP | POLLFREE); 4374 + wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); 4375 4375 } 4376 4376 4377 4377 binder_inner_proc_unlock(thread->proc); ··· 4401 4401 poll_wait(filp, &thread->wait, wait); 4402 4402 4403 4403 if (binder_has_work(thread, wait_for_proc_work)) 4404 - return POLLIN; 4404 + return EPOLLIN; 4405 4405 4406 4406 return 0; 4407 4407 }
+2 -2
drivers/bluetooth/hci_vhci.c
··· 306 306 poll_wait(file, &data->read_wait, wait); 307 307 308 308 if (!skb_queue_empty(&data->readq)) 309 - return POLLIN | POLLRDNORM; 309 + return EPOLLIN | EPOLLRDNORM; 310 310 311 - return POLLOUT | POLLWRNORM; 311 + return EPOLLOUT | EPOLLWRNORM; 312 312 } 313 313 314 314 static void vhci_open_timeout(struct work_struct *work)
+1 -1
drivers/char/apm-emulation.c
··· 241 241 struct apm_user *as = fp->private_data; 242 242 243 243 poll_wait(fp, &apm_waitqueue, wait); 244 - return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM; 244 + return queue_empty(&as->queue) ? 0 : EPOLLIN | EPOLLRDNORM; 245 245 } 246 246 247 247 /*
+1 -1
drivers/char/dsp56k.c
··· 414 414 { 415 415 case DSP56K_DEV_56001: 416 416 /* poll_wait(file, ???, wait); */ 417 - return POLLIN | POLLRDNORM | POLLOUT; 417 + return EPOLLIN | EPOLLRDNORM | EPOLLOUT; 418 418 419 419 default: 420 420 printk("DSP56k driver: Unknown minor device: %d\n", dev);
+3 -3
drivers/char/dtlk.c
··· 62 62 #include <linux/uaccess.h> /* for get_user, etc. */ 63 63 #include <linux/wait.h> /* for wait_queue */ 64 64 #include <linux/init.h> /* for __init, module_{init,exit} */ 65 - #include <linux/poll.h> /* for POLLIN, etc. */ 65 + #include <linux/poll.h> /* for EPOLLIN, etc. */ 66 66 #include <linux/dtlk.h> /* local header file for DoubleTalk values */ 67 67 68 68 #ifdef TRACING ··· 244 244 245 245 if (dtlk_has_indexing && dtlk_readable()) { 246 246 del_timer(&dtlk_timer); 247 - mask = POLLIN | POLLRDNORM; 247 + mask = EPOLLIN | EPOLLRDNORM; 248 248 } 249 249 if (dtlk_writeable()) { 250 250 del_timer(&dtlk_timer); 251 - mask |= POLLOUT | POLLWRNORM; 251 + mask |= EPOLLOUT | EPOLLWRNORM; 252 252 } 253 253 /* there are no exception conditions */ 254 254
+1 -1
drivers/char/hpet.c
··· 359 359 spin_unlock_irq(&hpet_lock); 360 360 361 361 if (v != 0) 362 - return POLLIN | POLLRDNORM; 362 + return EPOLLIN | EPOLLRDNORM; 363 363 364 364 return 0; 365 365 }
+2 -2
drivers/char/ipmi/bt-bmc.c
··· 349 349 ctrl = bt_inb(bt_bmc, BT_CTRL); 350 350 351 351 if (ctrl & BT_CTRL_H2B_ATN) 352 - mask |= POLLIN; 352 + mask |= EPOLLIN; 353 353 354 354 if (!(ctrl & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) 355 - mask |= POLLOUT; 355 + mask |= EPOLLOUT; 356 356 357 357 return mask; 358 358 }
+1 -1
drivers/char/ipmi/ipmi_devintf.c
··· 89 89 spin_lock_irqsave(&priv->recv_msg_lock, flags); 90 90 91 91 if (!list_empty(&(priv->recv_msgs))) 92 - mask |= (POLLIN | POLLRDNORM); 92 + mask |= (EPOLLIN | EPOLLRDNORM); 93 93 94 94 spin_unlock_irqrestore(&priv->recv_msg_lock, flags); 95 95
+1 -1
drivers/char/ipmi/ipmi_watchdog.c
··· 895 895 896 896 spin_lock(&ipmi_read_lock); 897 897 if (data_to_read) 898 - mask |= (POLLIN | POLLRDNORM); 898 + mask |= (EPOLLIN | EPOLLRDNORM); 899 899 spin_unlock(&ipmi_read_lock); 900 900 901 901 return mask;
+2 -2
drivers/char/pcmcia/cm4040_cs.c
··· 423 423 poll_wait(filp, &dev->poll_wait, wait); 424 424 425 425 if (test_and_clear_bit(BS_READABLE, &dev->buffer_status)) 426 - mask |= POLLIN | POLLRDNORM; 426 + mask |= EPOLLIN | EPOLLRDNORM; 427 427 if (test_and_clear_bit(BS_WRITABLE, &dev->buffer_status)) 428 - mask |= POLLOUT | POLLWRNORM; 428 + mask |= EPOLLOUT | EPOLLWRNORM; 429 429 430 430 DEBUGP(2, dev, "<- cm4040_poll(%u)\n", mask); 431 431
+1 -1
drivers/char/ppdev.c
··· 776 776 777 777 poll_wait(file, &pp->irq_wait, wait); 778 778 if (atomic_read(&pp->irqc)) 779 - mask |= POLLIN | POLLRDNORM; 779 + mask |= EPOLLIN | EPOLLRDNORM; 780 780 781 781 return mask; 782 782 }
+2 -2
drivers/char/random.c
··· 1793 1793 poll_wait(file, &random_write_wait, wait); 1794 1794 mask = 0; 1795 1795 if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits) 1796 - mask |= POLLIN | POLLRDNORM; 1796 + mask |= EPOLLIN | EPOLLRDNORM; 1797 1797 if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits) 1798 - mask |= POLLOUT | POLLWRNORM; 1798 + mask |= EPOLLOUT | EPOLLWRNORM; 1799 1799 return mask; 1800 1800 } 1801 1801
+1 -1
drivers/char/rtc.c
··· 804 804 spin_unlock_irq(&rtc_lock); 805 805 806 806 if (l != 0) 807 - return POLLIN | POLLRDNORM; 807 + return EPOLLIN | EPOLLRDNORM; 808 808 return 0; 809 809 } 810 810 #endif
+2 -2
drivers/char/snsc.c
··· 340 340 341 341 if (status > 0) { 342 342 if (status & SAL_IROUTER_INTR_RECV) { 343 - mask |= POLLIN | POLLRDNORM; 343 + mask |= EPOLLIN | EPOLLRDNORM; 344 344 } 345 345 if (status & SAL_IROUTER_INTR_XMIT) { 346 - mask |= POLLOUT | POLLWRNORM; 346 + mask |= EPOLLOUT | EPOLLWRNORM; 347 347 } 348 348 } 349 349
+1 -1
drivers/char/sonypi.c
··· 944 944 { 945 945 poll_wait(file, &sonypi_device.fifo_proc_list, wait); 946 946 if (kfifo_len(&sonypi_device.fifo)) 947 - return POLLIN | POLLRDNORM; 947 + return EPOLLIN | EPOLLRDNORM; 948 948 return 0; 949 949 } 950 950
+3 -3
drivers/char/tpm/tpm_vtpm_proxy.c
··· 180 180 181 181 poll_wait(filp, &proxy_dev->wq, wait); 182 182 183 - ret = POLLOUT; 183 + ret = EPOLLOUT; 184 184 185 185 mutex_lock(&proxy_dev->buf_lock); 186 186 187 187 if (proxy_dev->req_len) 188 - ret |= POLLIN | POLLRDNORM; 188 + ret |= EPOLLIN | EPOLLRDNORM; 189 189 190 190 if (!(proxy_dev->state & STATE_OPENED_FLAG)) 191 - ret |= POLLHUP; 191 + ret |= EPOLLHUP; 192 192 193 193 mutex_unlock(&proxy_dev->buf_lock); 194 194
+4 -4
drivers/char/virtio_console.c
··· 992 992 993 993 if (!port->guest_connected) { 994 994 /* Port got unplugged */ 995 - return POLLHUP; 995 + return EPOLLHUP; 996 996 } 997 997 ret = 0; 998 998 if (!will_read_block(port)) 999 - ret |= POLLIN | POLLRDNORM; 999 + ret |= EPOLLIN | EPOLLRDNORM; 1000 1000 if (!will_write_block(port)) 1001 - ret |= POLLOUT; 1001 + ret |= EPOLLOUT; 1002 1002 if (!port->host_connected) 1003 - ret |= POLLHUP; 1003 + ret |= EPOLLHUP; 1004 1004 1005 1005 return ret; 1006 1006 }
+6 -6
drivers/char/xillybus/xillybus_core.c
··· 1758 1758 1759 1759 spin_lock_irqsave(&channel->wr_spinlock, flags); 1760 1760 if (!channel->wr_empty || channel->wr_ready) 1761 - mask |= POLLIN | POLLRDNORM; 1761 + mask |= EPOLLIN | EPOLLRDNORM; 1762 1762 1763 1763 if (channel->wr_hangup) 1764 1764 /* 1765 - * Not POLLHUP, because its behavior is in the 1766 - * mist, and POLLIN does what we want: Wake up 1765 + * Not EPOLLHUP, because its behavior is in the 1766 + * mist, and EPOLLIN does what we want: Wake up 1767 1767 * the read file descriptor so it sees EOF. 1768 1768 */ 1769 - mask |= POLLIN | POLLRDNORM; 1769 + mask |= EPOLLIN | EPOLLRDNORM; 1770 1770 spin_unlock_irqrestore(&channel->wr_spinlock, flags); 1771 1771 } 1772 1772 ··· 1781 1781 1782 1782 spin_lock_irqsave(&channel->rd_spinlock, flags); 1783 1783 if (!channel->rd_full) 1784 - mask |= POLLOUT | POLLWRNORM; 1784 + mask |= EPOLLOUT | EPOLLWRNORM; 1785 1785 spin_unlock_irqrestore(&channel->rd_spinlock, flags); 1786 1786 } 1787 1787 1788 1788 if (channel->endpoint->fatal_error) 1789 - mask |= POLLERR; 1789 + mask |= EPOLLERR; 1790 1790 1791 1791 return mask; 1792 1792 }
+13 -13
drivers/dma-buf/dma-buf.c
··· 135 135 * Userspace can query the state of these implicitly tracked fences using poll() 136 136 * and related system calls: 137 137 * 138 - * - Checking for POLLIN, i.e. read access, can be use to query the state of the 138 + * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the 139 139 * most recent write or exclusive fence. 140 140 * 141 - * - Checking for POLLOUT, i.e. write access, can be used to query the state of 141 + * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of 142 142 * all attached fences, shared and exclusive ones. 143 143 * 144 144 * Note that this only signals the completion of the respective fences, i.e. the ··· 168 168 169 169 dmabuf = file->private_data; 170 170 if (!dmabuf || !dmabuf->resv) 171 - return POLLERR; 171 + return EPOLLERR; 172 172 173 173 resv = dmabuf->resv; 174 174 175 175 poll_wait(file, &dmabuf->poll, poll); 176 176 177 - events = poll_requested_events(poll) & (POLLIN | POLLOUT); 177 + events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT); 178 178 if (!events) 179 179 return 0; 180 180 ··· 193 193 goto retry; 194 194 } 195 195 196 - if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) { 196 + if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) { 197 197 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; 198 - __poll_t pevents = POLLIN; 198 + __poll_t pevents = EPOLLIN; 199 199 200 200 if (shared_count == 0) 201 - pevents |= POLLOUT; 201 + pevents |= EPOLLOUT; 202 202 203 203 spin_lock_irq(&dmabuf->poll.lock); 204 204 if (dcb->active) { ··· 228 228 } 229 229 } 230 230 231 - if ((events & POLLOUT) && shared_count > 0) { 231 + if ((events & EPOLLOUT) && shared_count > 0) { 232 232 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared; 233 233 int i; 234 234 235 235 /* Only queue a new callback if no event has fired yet */ 236 236 spin_lock_irq(&dmabuf->poll.lock); 237 237 if (dcb->active) 238 - events &= ~POLLOUT; 238 + events &= ~EPOLLOUT; 239 239 else 240 - dcb->active = POLLOUT; 240 + dcb->active = EPOLLOUT; 241 241 spin_unlock_irq(&dmabuf->poll.lock); 242 242 243 - if (!(events & POLLOUT)) 243 + if (!(events & EPOLLOUT)) 244 244 goto out; 245 245 246 246 for (i = 0; i < shared_count; ++i) { ··· 253 253 * 254 254 * call dma_buf_poll_cb and force a recheck! 255 255 */ 256 - events &= ~POLLOUT; 256 + events &= ~EPOLLOUT; 257 257 dma_buf_poll_cb(NULL, &dcb->cb); 258 258 break; 259 259 } 260 260 if (!dma_fence_add_callback(fence, &dcb->cb, 261 261 dma_buf_poll_cb)) { 262 262 dma_fence_put(fence); 263 - events &= ~POLLOUT; 263 + events &= ~EPOLLOUT; 264 264 break; 265 265 } 266 266 dma_fence_put(fence);
+1 -1
drivers/dma-buf/sync_file.c
··· 325 325 wake_up_all(&sync_file->wq); 326 326 } 327 327 328 - return dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0; 328 + return dma_fence_is_signaled(sync_file->fence) ? EPOLLIN : 0; 329 329 } 330 330 331 331 static long sync_file_ioctl_merge(struct sync_file *sync_file,
+2 -2
drivers/firewire/core-cdev.c
··· 1792 1792 poll_wait(file, &client->wait, pt); 1793 1793 1794 1794 if (fw_device_is_shutdown(client->device)) 1795 - mask |= POLLHUP | POLLERR; 1795 + mask |= EPOLLHUP | EPOLLERR; 1796 1796 if (!list_empty(&client->event_list)) 1797 - mask |= POLLIN | POLLRDNORM; 1797 + mask |= EPOLLIN | EPOLLRDNORM; 1798 1798 1799 1799 return mask; 1800 1800 }
+2 -2
drivers/firewire/nosy.c
··· 337 337 poll_wait(file, &client->buffer.wait, pt); 338 338 339 339 if (atomic_read(&client->buffer.size) > 0) 340 - ret = POLLIN | POLLRDNORM; 340 + ret = EPOLLIN | EPOLLRDNORM; 341 341 342 342 if (list_empty(&client->lynx->link)) 343 - ret |= POLLHUP; 343 + ret |= EPOLLHUP; 344 344 345 345 return ret; 346 346 }
+2 -2
drivers/gpio/gpiolib.c
··· 630 630 poll_wait(filep, &le->wait, wait); 631 631 632 632 if (!kfifo_is_empty(&le->events)) 633 - events = POLLIN | POLLRDNORM; 633 + events = EPOLLIN | EPOLLRDNORM; 634 634 635 635 return events; 636 636 } ··· 775 775 776 776 ret = kfifo_put(&le->events, ge); 777 777 if (ret != 0) 778 - wake_up_poll(&le->wait, POLLIN); 778 + wake_up_poll(&le->wait, EPOLLIN); 779 779 780 780 return IRQ_HANDLED; 781 781 }
+1 -1
drivers/gpu/drm/drm_file.c
··· 567 567 poll_wait(filp, &file_priv->event_wait, wait); 568 568 569 569 if (!list_empty(&file_priv->event_list)) 570 - mask |= POLLIN | POLLRDNORM; 570 + mask |= EPOLLIN | EPOLLRDNORM; 571 571 572 572 return mask; 573 573 }
+5 -5
drivers/gpu/drm/i915/i915_perf.c
··· 244 244 * The two separate pointers let us decouple read()s from tail pointer aging. 245 245 * 246 246 * The tail pointers are checked and updated at a limited rate within a hrtimer 247 - * callback (the same callback that is used for delivering POLLIN events) 247 + * callback (the same callback that is used for delivering EPOLLIN events) 248 248 * 249 249 * Initially the tails are marked invalid with %INVALID_TAIL_PTR which 250 250 * indicates that an updated tail pointer is needed. ··· 2292 2292 mutex_unlock(&dev_priv->perf.lock); 2293 2293 } 2294 2294 2295 - /* We allow the poll checking to sometimes report false positive POLLIN 2295 + /* We allow the poll checking to sometimes report false positive EPOLLIN 2296 2296 * events where we might actually report EAGAIN on read() if there's 2297 2297 * not really any data available. In this situation though we don't 2298 - * want to enter a busy loop between poll() reporting a POLLIN event 2298 + * want to enter a busy loop between poll() reporting a EPOLLIN event 2299 2299 * and read() returning -EAGAIN. Clearing the oa.pollin state here 2300 2300 * effectively ensures we back off until the next hrtimer callback 2301 - * before reporting another POLLIN event. 2301 + * before reporting another EPOLLIN event. 2302 2302 */ 2303 2303 if (ret >= 0 || ret == -EAGAIN) { 2304 2304 /* Maybe make ->pollin per-stream state if we support multiple ··· 2358 2358 * samples to read. 2359 2359 */ 2360 2360 if (dev_priv->perf.oa.pollin) 2361 - events |= POLLIN; 2361 + events |= EPOLLIN; 2362 2362 2363 2363 return events; 2364 2364 }
+1 -1
drivers/gpu/vga/vgaarb.c
··· 1271 1271 pr_debug("%s\n", __func__); 1272 1272 1273 1273 poll_wait(file, &vga_wait_queue, wait); 1274 - return POLLIN; 1274 + return EPOLLIN; 1275 1275 } 1276 1276 1277 1277 static int vga_arb_open(struct inode *inode, struct file *file)
+2 -2
drivers/hid/hid-debug.c
··· 1185 1185 1186 1186 poll_wait(file, &list->hdev->debug_wait, wait); 1187 1187 if (list->head != list->tail) 1188 - return POLLIN | POLLRDNORM; 1188 + return EPOLLIN | EPOLLRDNORM; 1189 1189 if (!list->hdev->debug) 1190 - return POLLERR | POLLHUP; 1190 + return EPOLLERR | EPOLLHUP; 1191 1191 return 0; 1192 1192 } 1193 1193
+2 -2
drivers/hid/hid-roccat.c
··· 142 142 struct roccat_reader *reader = file->private_data; 143 143 poll_wait(file, &reader->device->wait, wait); 144 144 if (reader->cbuf_start != reader->device->cbuf_end) 145 - return POLLIN | POLLRDNORM; 145 + return EPOLLIN | EPOLLRDNORM; 146 146 if (!reader->device->exist) 147 - return POLLERR | POLLHUP; 147 + return EPOLLERR | EPOLLHUP; 148 148 return 0; 149 149 } 150 150
+1 -1
drivers/hid/hid-sensor-custom.c
··· 714 714 poll_wait(file, &sensor_inst->wait, wait); 715 715 716 716 if (!kfifo_is_empty(&sensor_inst->data_fifo)) 717 - mask = POLLIN | POLLRDNORM; 717 + mask = EPOLLIN | EPOLLRDNORM; 718 718 719 719 return mask; 720 720 }
+2 -2
drivers/hid/hidraw.c
··· 255 255 256 256 poll_wait(file, &list->hidraw->wait, wait); 257 257 if (list->head != list->tail) 258 - return POLLIN | POLLRDNORM; 258 + return EPOLLIN | EPOLLRDNORM; 259 259 if (!list->hidraw->exist) 260 - return POLLERR | POLLHUP; 260 + return EPOLLERR | EPOLLHUP; 261 261 return 0; 262 262 } 263 263
+1 -1
drivers/hid/uhid.c
··· 760 760 poll_wait(file, &uhid->waitq, wait); 761 761 762 762 if (uhid->head != uhid->tail) 763 - return POLLIN | POLLRDNORM; 763 + return EPOLLIN | EPOLLRDNORM; 764 764 765 765 return 0; 766 766 }
+2 -2
drivers/hid/usbhid/hiddev.c
··· 428 428 429 429 poll_wait(file, &list->hiddev->wait, wait); 430 430 if (list->head != list->tail) 431 - return POLLIN | POLLRDNORM; 431 + return EPOLLIN | EPOLLRDNORM; 432 432 if (!list->hiddev->exist) 433 - return POLLERR | POLLHUP; 433 + return EPOLLERR | EPOLLHUP; 434 434 return 0; 435 435 } 436 436
+2 -2
drivers/hsi/clients/cmt_speech.c
··· 1132 1132 poll_wait(file, &cs_char_data.wait, wait); 1133 1133 spin_lock_bh(&csdata->lock); 1134 1134 if (!list_empty(&csdata->chardev_queue)) 1135 - ret = POLLIN | POLLRDNORM; 1135 + ret = EPOLLIN | EPOLLRDNORM; 1136 1136 else if (!list_empty(&csdata->dataind_queue)) 1137 - ret = POLLIN | POLLRDNORM; 1137 + ret = EPOLLIN | EPOLLRDNORM; 1138 1138 spin_unlock_bh(&csdata->lock); 1139 1139 1140 1140 return ret;
+2 -2
drivers/hv/hv_utils_transport.c
··· 113 113 poll_wait(file, &hvt->outmsg_q, wait); 114 114 115 115 if (hvt->mode == HVUTIL_TRANSPORT_DESTROY) 116 - return POLLERR | POLLHUP; 116 + return EPOLLERR | EPOLLHUP; 117 117 118 118 if (hvt->outmsg_len > 0) 119 - return POLLIN | POLLRDNORM; 119 + return EPOLLIN | EPOLLRDNORM; 120 120 121 121 return 0; 122 122 }
+2 -2
drivers/iio/buffer/industrialio-buffer-dma.c
··· 222 222 spin_unlock_irqrestore(&queue->list_lock, flags); 223 223 224 224 iio_buffer_block_put_atomic(block); 225 - wake_up_interruptible_poll(&queue->buffer.pollq, POLLIN | POLLRDNORM); 225 + wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); 226 226 } 227 227 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done); 228 228 ··· 251 251 } 252 252 spin_unlock_irqrestore(&queue->list_lock, flags); 253 253 254 - wake_up_interruptible_poll(&queue->buffer.pollq, POLLIN | POLLRDNORM); 254 + wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); 255 255 } 256 256 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort); 257 257
+3 -3
drivers/iio/industrialio-buffer.c
··· 166 166 * @wait: Poll table structure pointer for which the driver adds 167 167 * a wait queue 168 168 * 169 - * Return: (POLLIN | POLLRDNORM) if data is available for reading 169 + * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading 170 170 * or 0 for other cases 171 171 */ 172 172 __poll_t iio_buffer_poll(struct file *filp, ··· 180 180 181 181 poll_wait(filp, &rb->pollq, wait); 182 182 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0)) 183 - return POLLIN | POLLRDNORM; 183 + return EPOLLIN | EPOLLRDNORM; 184 184 return 0; 185 185 } 186 186 ··· 1396 1396 * We can't just test for watermark to decide if we wake the poll queue 1397 1397 * because read may request less samples than the watermark. 1398 1398 */ 1399 - wake_up_interruptible_poll(&buffer->pollq, POLLIN | POLLRDNORM); 1399 + wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM); 1400 1400 return 0; 1401 1401 } 1402 1402
+3 -3
drivers/iio/industrialio-event.c
··· 80 80 81 81 copied = kfifo_put(&ev_int->det_events, ev); 82 82 if (copied != 0) 83 - wake_up_poll(&ev_int->wait, POLLIN); 83 + wake_up_poll(&ev_int->wait, EPOLLIN); 84 84 } 85 85 86 86 return 0; ··· 92 92 * @filep: File structure pointer to identify the device 93 93 * @wait: Poll table pointer to add the wait queue on 94 94 * 95 - * Return: (POLLIN | POLLRDNORM) if data is available for reading 95 + * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading 96 96 * or a negative error code on failure 97 97 */ 98 98 static __poll_t iio_event_poll(struct file *filep, ··· 108 108 poll_wait(filep, &ev_int->wait, wait); 109 109 110 110 if (!kfifo_is_empty(&ev_int->det_events)) 111 - events = POLLIN | POLLRDNORM; 111 + events = EPOLLIN | EPOLLRDNORM; 112 112 113 113 return events; 114 114 }
+1 -1
drivers/infiniband/core/ucm.c
··· 1144 1144 poll_wait(filp, &file->poll_wait, wait); 1145 1145 1146 1146 if (!list_empty(&file->events)) 1147 - mask = POLLIN | POLLRDNORM; 1147 + mask = EPOLLIN | EPOLLRDNORM; 1148 1148 1149 1149 return mask; 1150 1150 }
+1 -1
drivers/infiniband/core/ucma.c
··· 1639 1639 poll_wait(filp, &file->poll_wait, wait); 1640 1640 1641 1641 if (!list_empty(&file->event_list)) 1642 - mask = POLLIN | POLLRDNORM; 1642 + mask = EPOLLIN | EPOLLRDNORM; 1643 1643 1644 1644 return mask; 1645 1645 }
+2 -2
drivers/infiniband/core/user_mad.c
··· 633 633 struct ib_umad_file *file = filp->private_data; 634 634 635 635 /* we will always be able to post a MAD send */ 636 - __poll_t mask = POLLOUT | POLLWRNORM; 636 + __poll_t mask = EPOLLOUT | EPOLLWRNORM; 637 637 638 638 poll_wait(filp, &file->recv_wait, wait); 639 639 640 640 if (!list_empty(&file->recv_list)) 641 - mask |= POLLIN | POLLRDNORM; 641 + mask |= EPOLLIN | EPOLLRDNORM; 642 642 643 643 return mask; 644 644 }
+1 -1
drivers/infiniband/core/uverbs_main.c
··· 351 351 352 352 spin_lock_irq(&ev_queue->lock); 353 353 if (!list_empty(&ev_queue->event_list)) 354 - pollflags = POLLIN | POLLRDNORM; 354 + pollflags = EPOLLIN | EPOLLRDNORM; 355 355 spin_unlock_irq(&ev_queue->lock); 356 356 357 357 return pollflags;
+4 -4
drivers/infiniband/hw/hfi1/file_ops.c
··· 612 612 613 613 uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt; 614 614 if (!uctxt) 615 - pollflag = POLLERR; 615 + pollflag = EPOLLERR; 616 616 else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT) 617 617 pollflag = poll_urgent(fp, pt); 618 618 else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV) 619 619 pollflag = poll_next(fp, pt); 620 620 else /* invalid */ 621 - pollflag = POLLERR; 621 + pollflag = EPOLLERR; 622 622 623 623 return pollflag; 624 624 } ··· 1435 1435 1436 1436 spin_lock_irq(&dd->uctxt_lock); 1437 1437 if (uctxt->urgent != uctxt->urgent_poll) { 1438 - pollflag = POLLIN | POLLRDNORM; 1438 + pollflag = EPOLLIN | EPOLLRDNORM; 1439 1439 uctxt->urgent_poll = uctxt->urgent; 1440 1440 } else { 1441 1441 pollflag = 0; ··· 1462 1462 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt); 1463 1463 pollflag = 0; 1464 1464 } else { 1465 - pollflag = POLLIN | POLLRDNORM; 1465 + pollflag = EPOLLIN | EPOLLRDNORM; 1466 1466 } 1467 1467 spin_unlock_irq(&dd->uctxt_lock); 1468 1468
+4 -4
drivers/infiniband/hw/qib/qib_file_ops.c
··· 1085 1085 1086 1086 spin_lock_irq(&dd->uctxt_lock); 1087 1087 if (rcd->urgent != rcd->urgent_poll) { 1088 - pollflag = POLLIN | POLLRDNORM; 1088 + pollflag = EPOLLIN | EPOLLRDNORM; 1089 1089 rcd->urgent_poll = rcd->urgent; 1090 1090 } else { 1091 1091 pollflag = 0; ··· 1111 1111 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt); 1112 1112 pollflag = 0; 1113 1113 } else 1114 - pollflag = POLLIN | POLLRDNORM; 1114 + pollflag = EPOLLIN | EPOLLRDNORM; 1115 1115 spin_unlock_irq(&dd->uctxt_lock); 1116 1116 1117 1117 return pollflag; ··· 1124 1124 1125 1125 rcd = ctxt_fp(fp); 1126 1126 if (!rcd) 1127 - pollflag = POLLERR; 1127 + pollflag = EPOLLERR; 1128 1128 else if (rcd->poll_type == QIB_POLL_TYPE_URGENT) 1129 1129 pollflag = qib_poll_urgent(rcd, fp, pt); 1130 1130 else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV) 1131 1131 pollflag = qib_poll_next(rcd, fp, pt); 1132 1132 else /* invalid */ 1133 - pollflag = POLLERR; 1133 + pollflag = EPOLLERR; 1134 1134 1135 1135 return pollflag; 1136 1136 }
+1 -1
drivers/infiniband/ulp/iser/iscsi_iser.c
··· 874 874 iser_info("iser conn %p rc = %d\n", iser_conn, rc); 875 875 876 876 if (rc > 0) 877 - return 1; /* success, this is the equivalent of POLLOUT */ 877 + return 1; /* success, this is the equivalent of EPOLLOUT */ 878 878 else if (!rc) 879 879 return 0; /* timeout */ 880 880 else
+3 -3
drivers/input/evdev.c
··· 650 650 poll_wait(file, &evdev->wait, wait); 651 651 652 652 if (evdev->exist && !client->revoked) 653 - mask = POLLOUT | POLLWRNORM; 653 + mask = EPOLLOUT | EPOLLWRNORM; 654 654 else 655 - mask = POLLHUP | POLLERR; 655 + mask = EPOLLHUP | EPOLLERR; 656 656 657 657 if (client->packet_head != client->tail) 658 - mask |= POLLIN | POLLRDNORM; 658 + mask |= EPOLLIN | EPOLLRDNORM; 659 659 660 660 return mask; 661 661 }
+1 -1
drivers/input/input.c
··· 1053 1053 poll_wait(file, &input_devices_poll_wait, wait); 1054 1054 if (file->f_version != input_devices_state) { 1055 1055 file->f_version = input_devices_state; 1056 - return POLLIN | POLLRDNORM; 1056 + return EPOLLIN | EPOLLRDNORM; 1057 1057 } 1058 1058 1059 1059 return 0;
+2 -2
drivers/input/joydev.c
··· 442 442 struct joydev *joydev = client->joydev; 443 443 444 444 poll_wait(file, &joydev->wait, wait); 445 - return (joydev_data_pending(client) ? (POLLIN | POLLRDNORM) : 0) | 446 - (joydev->exist ? 0 : (POLLHUP | POLLERR)); 445 + return (joydev_data_pending(client) ? (EPOLLIN | EPOLLRDNORM) : 0) | 446 + (joydev->exist ? 0 : (EPOLLHUP | EPOLLERR)); 447 447 } 448 448 449 449 static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev,
+1 -1
drivers/input/misc/hp_sdc_rtc.c
··· 414 414 415 415 l = 0; 416 416 if (l != 0) 417 - return POLLIN | POLLRDNORM; 417 + return EPOLLIN | EPOLLRDNORM; 418 418 return 0; 419 419 } 420 420
+1 -1
drivers/input/misc/uinput.c
··· 704 704 poll_wait(file, &udev->waitq, wait); 705 705 706 706 if (udev->head != udev->tail) 707 - return POLLIN | POLLRDNORM; 707 + return EPOLLIN | EPOLLRDNORM; 708 708 709 709 return 0; 710 710 }
+2 -2
drivers/input/mousedev.c
··· 765 765 766 766 poll_wait(file, &mousedev->wait, wait); 767 767 768 - mask = mousedev->exist ? POLLOUT | POLLWRNORM : POLLHUP | POLLERR; 768 + mask = mousedev->exist ? EPOLLOUT | EPOLLWRNORM : EPOLLHUP | EPOLLERR; 769 769 if (client->ready || client->buffer) 770 - mask |= POLLIN | POLLRDNORM; 770 + mask |= EPOLLIN | EPOLLRDNORM; 771 771 772 772 return mask; 773 773 }
+2 -2
drivers/input/serio/serio_raw.c
··· 247 247 248 248 poll_wait(file, &serio_raw->wait, wait); 249 249 250 - mask = serio_raw->dead ? POLLHUP | POLLERR : POLLOUT | POLLWRNORM; 250 + mask = serio_raw->dead ? EPOLLHUP | EPOLLERR : EPOLLOUT | EPOLLWRNORM; 251 251 if (serio_raw->head != serio_raw->tail) 252 - mask |= POLLIN | POLLRDNORM; 252 + mask |= EPOLLIN | EPOLLRDNORM; 253 253 254 254 return mask; 255 255 }
+1 -1
drivers/input/serio/userio.c
··· 255 255 poll_wait(file, &userio->waitq, wait); 256 256 257 257 if (userio->head != userio->tail) 258 - return POLLIN | POLLRDNORM; 258 + return EPOLLIN | EPOLLRDNORM; 259 259 260 260 return 0; 261 261 }
+3 -3
drivers/isdn/capi/capi.c
··· 731 731 __poll_t mask = 0; 732 732 733 733 if (!cdev->ap.applid) 734 - return POLLERR; 734 + return EPOLLERR; 735 735 736 736 poll_wait(file, &(cdev->recvwait), wait); 737 - mask = POLLOUT | POLLWRNORM; 737 + mask = EPOLLOUT | EPOLLWRNORM; 738 738 if (!skb_queue_empty(&cdev->recvqueue)) 739 - mask |= POLLIN | POLLRDNORM; 739 + mask |= EPOLLIN | EPOLLRDNORM; 740 740 return mask; 741 741 } 742 742
+2 -2
drivers/isdn/divert/divert_procfs.c
··· 125 125 __poll_t mask = 0; 126 126 127 127 poll_wait(file, &(rd_queue), wait); 128 - /* mask = POLLOUT | POLLWRNORM; */ 128 + /* mask = EPOLLOUT | EPOLLWRNORM; */ 129 129 if (*((struct divert_info **) file->private_data)) { 130 - mask |= POLLIN | POLLRDNORM; 130 + mask |= EPOLLIN | EPOLLRDNORM; 131 131 } 132 132 return mask; 133 133 } /* isdn_divert_poll */
+2 -2
drivers/isdn/hardware/eicon/divamnt.c
··· 103 103 __poll_t mask = 0; 104 104 105 105 poll_wait(file, &msgwaitq, wait); 106 - mask = POLLOUT | POLLWRNORM; 106 + mask = EPOLLOUT | EPOLLWRNORM; 107 107 if (file->private_data || diva_dbg_q_length()) { 108 - mask |= POLLIN | POLLRDNORM; 108 + mask |= EPOLLIN | EPOLLRDNORM; 109 109 } 110 110 return (mask); 111 111 }
+5 -5
drivers/isdn/hardware/eicon/divasi.c
··· 370 370 diva_um_idi_os_context_t *p_os; 371 371 372 372 if (!file->private_data) { 373 - return (POLLERR); 373 + return (EPOLLERR); 374 374 } 375 375 376 376 if ((!(p_os = 377 377 (diva_um_idi_os_context_t *) 378 378 diva_um_id_get_os_context(file->private_data))) 379 379 || p_os->aborted) { 380 - return (POLLERR); 380 + return (EPOLLERR); 381 381 } 382 382 383 383 poll_wait(file, &p_os->read_wait, wait); 384 384 385 385 if (p_os->aborted) { 386 - return (POLLERR); 386 + return (EPOLLERR); 387 387 } 388 388 389 389 switch (diva_user_mode_idi_ind_ready(file->private_data, file)) { 390 390 case (-1): 391 - return (POLLERR); 391 + return (EPOLLERR); 392 392 393 393 case 0: 394 394 return (0); 395 395 } 396 396 397 - return (POLLIN | POLLRDNORM); 397 + return (EPOLLIN | EPOLLRDNORM); 398 398 } 399 399 400 400 static int um_idi_open(struct inode *inode, struct file *file)
+2 -2
drivers/isdn/hardware/eicon/divasmain.c
··· 653 653 static __poll_t divas_poll(struct file *file, poll_table *wait) 654 654 { 655 655 if (!file->private_data) { 656 - return (POLLERR); 656 + return (EPOLLERR); 657 657 } 658 - return (POLLIN | POLLRDNORM); 658 + return (EPOLLIN | EPOLLRDNORM); 659 659 } 660 660 661 661 static const struct file_operations divas_fops = {
+1 -1
drivers/isdn/hardware/eicon/divasproc.c
··· 101 101 102 102 static __poll_t divas_poll(struct file *file, poll_table *wait) 103 103 { 104 - return (POLLERR); 104 + return (EPOLLERR); 105 105 } 106 106 107 107 static int divas_open(struct inode *inode, struct file *file)
+1 -1
drivers/isdn/hysdn/hysdn_proclog.c
··· 294 294 poll_wait(file, &(pd->rd_queue), wait); 295 295 296 296 if (*((struct log_data **) file->private_data)) 297 - mask |= POLLIN | POLLRDNORM; 297 + mask |= EPOLLIN | EPOLLRDNORM; 298 298 299 299 return mask; 300 300 } /* hysdn_log_poll */
+6 -6
drivers/isdn/i4l/isdn_common.c
··· 1237 1237 mutex_lock(&isdn_mutex); 1238 1238 if (minor == ISDN_MINOR_STATUS) { 1239 1239 poll_wait(file, &(dev->info_waitq), wait); 1240 - /* mask = POLLOUT | POLLWRNORM; */ 1240 + /* mask = EPOLLOUT | EPOLLWRNORM; */ 1241 1241 if (file->private_data) { 1242 - mask |= POLLIN | POLLRDNORM; 1242 + mask |= EPOLLIN | EPOLLRDNORM; 1243 1243 } 1244 1244 goto out; 1245 1245 } 1246 1246 if (minor >= ISDN_MINOR_CTRL && minor <= ISDN_MINOR_CTRLMAX) { 1247 1247 if (drvidx < 0) { 1248 1248 /* driver deregistered while file open */ 1249 - mask = POLLHUP; 1249 + mask = EPOLLHUP; 1250 1250 goto out; 1251 1251 } 1252 1252 poll_wait(file, &(dev->drv[drvidx]->st_waitq), wait); 1253 - mask = POLLOUT | POLLWRNORM; 1253 + mask = EPOLLOUT | EPOLLWRNORM; 1254 1254 if (dev->drv[drvidx]->stavail) { 1255 - mask |= POLLIN | POLLRDNORM; 1255 + mask |= EPOLLIN | EPOLLRDNORM; 1256 1256 } 1257 1257 goto out; 1258 1258 } ··· 1262 1262 goto out; 1263 1263 } 1264 1264 #endif 1265 - mask = POLLERR; 1265 + mask = EPOLLERR; 1266 1266 out: 1267 1267 mutex_unlock(&isdn_mutex); 1268 1268 return mask;
+4 -4
drivers/isdn/i4l/isdn_ppp.c
··· 704 704 705 705 if (!(is->state & IPPP_OPEN)) { 706 706 if (is->state == IPPP_CLOSEWAIT) 707 - return POLLHUP; 707 + return EPOLLHUP; 708 708 printk(KERN_DEBUG "isdn_ppp: device not open\n"); 709 - return POLLERR; 709 + return EPOLLERR; 710 710 } 711 711 /* we're always ready to send .. */ 712 - mask = POLLOUT | POLLWRNORM; 712 + mask = EPOLLOUT | EPOLLWRNORM; 713 713 714 714 spin_lock_irqsave(&is->buflock, flags); 715 715 bl = is->last; ··· 719 719 */ 720 720 if (bf->next != bl || (is->state & IPPP_NOBLOCK)) { 721 721 is->state &= ~IPPP_NOBLOCK; 722 - mask |= POLLIN | POLLRDNORM; 722 + mask |= EPOLLIN | EPOLLRDNORM; 723 723 } 724 724 spin_unlock_irqrestore(&is->buflock, flags); 725 725 return mask;
+2 -2
drivers/isdn/mISDN/timerdev.c
··· 145 145 mISDN_poll(struct file *filep, poll_table *wait) 146 146 { 147 147 struct mISDNtimerdev *dev = filep->private_data; 148 - __poll_t mask = POLLERR; 148 + __poll_t mask = EPOLLERR; 149 149 150 150 if (*debug & DEBUG_TIMER) 151 151 printk(KERN_DEBUG "%s(%p, %p)\n", __func__, filep, wait); ··· 153 153 poll_wait(filep, &dev->wait, wait); 154 154 mask = 0; 155 155 if (dev->work || !list_empty(&dev->expired)) 156 - mask |= (POLLIN | POLLRDNORM); 156 + mask |= (EPOLLIN | EPOLLRDNORM); 157 157 if (*debug & DEBUG_TIMER) 158 158 printk(KERN_DEBUG "%s work(%d) empty(%d)\n", __func__, 159 159 dev->work, list_empty(&dev->expired));
+1 -1
drivers/leds/uleds.c
··· 183 183 poll_wait(file, &udev->waitq, wait); 184 184 185 185 if (udev->new_data) 186 - return POLLIN | POLLRDNORM; 186 + return EPOLLIN | EPOLLRDNORM; 187 187 188 188 return 0; 189 189 }
+1 -1
drivers/macintosh/smu.c
··· 1259 1259 1260 1260 spin_lock_irqsave(&pp->lock, flags); 1261 1261 if (pp->busy && pp->cmd.status != 1) 1262 - mask |= POLLIN; 1262 + mask |= EPOLLIN; 1263 1263 spin_unlock_irqrestore(&pp->lock, flags); 1264 1264 } 1265 1265 if (pp->mode == smu_file_events) {
+1 -1
drivers/macintosh/via-pmu.c
··· 2169 2169 poll_wait(filp, &pp->wait, wait); 2170 2170 spin_lock_irqsave(&pp->lock, flags); 2171 2171 if (pp->rb_get != pp->rb_put) 2172 - mask |= POLLIN; 2172 + mask |= EPOLLIN; 2173 2173 spin_unlock_irqrestore(&pp->lock, flags); 2174 2174 return mask; 2175 2175 }
+1 -1
drivers/mailbox/mailbox-test.c
··· 243 243 poll_wait(filp, &tdev->waitq, wait); 244 244 245 245 if (mbox_test_message_data_ready(tdev)) 246 - return POLLIN | POLLRDNORM; 246 + return EPOLLIN | EPOLLRDNORM; 247 247 return 0; 248 248 } 249 249
+1 -1
drivers/md/dm-ioctl.c
··· 1937 1937 poll_wait(filp, &dm_global_eventq, wait); 1938 1938 1939 1939 if ((int)(atomic_read(&dm_global_event_nr) - priv->global_event_nr) > 0) 1940 - mask |= POLLIN; 1940 + mask |= EPOLLIN; 1941 1941 1942 1942 return mask; 1943 1943 }
+3 -3
drivers/md/md.c
··· 7891 7891 __poll_t mask; 7892 7892 7893 7893 if (md_unloading) 7894 - return POLLIN|POLLRDNORM|POLLERR|POLLPRI; 7894 + return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI; 7895 7895 poll_wait(filp, &md_event_waiters, wait); 7896 7896 7897 7897 /* always allow read */ 7898 - mask = POLLIN | POLLRDNORM; 7898 + mask = EPOLLIN | EPOLLRDNORM; 7899 7899 7900 7900 if (seq->poll_event != atomic_read(&md_event_count)) 7901 - mask |= POLLERR | POLLPRI; 7901 + mask |= EPOLLERR | EPOLLPRI; 7902 7902 return mask; 7903 7903 } 7904 7904
+4 -4
drivers/media/cec/cec-api.c
··· 51 51 __poll_t res = 0; 52 52 53 53 if (!cec_is_registered(adap)) 54 - return POLLERR | POLLHUP; 54 + return EPOLLERR | EPOLLHUP; 55 55 mutex_lock(&adap->lock); 56 56 if (adap->is_configured && 57 57 adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ) 58 - res |= POLLOUT | POLLWRNORM; 58 + res |= EPOLLOUT | EPOLLWRNORM; 59 59 if (fh->queued_msgs) 60 - res |= POLLIN | POLLRDNORM; 60 + res |= EPOLLIN | EPOLLRDNORM; 61 61 if (fh->total_queued_events) 62 - res |= POLLPRI; 62 + res |= EPOLLPRI; 63 63 poll_wait(filp, &fh->wait, poll); 64 64 mutex_unlock(&adap->lock); 65 65 return res;
+3 -3
drivers/media/common/saa7146/saa7146_fops.c
··· 332 332 333 333 if (vdev->vfl_type == VFL_TYPE_VBI) { 334 334 if (fh->dev->ext_vv_data->capabilities & V4L2_CAP_SLICED_VBI_OUTPUT) 335 - return res | POLLOUT | POLLWRNORM; 335 + return res | EPOLLOUT | EPOLLWRNORM; 336 336 if( 0 == fh->vbi_q.streaming ) 337 337 return res | videobuf_poll_stream(file, &fh->vbi_q, wait); 338 338 q = &fh->vbi_q; ··· 346 346 347 347 if (!buf) { 348 348 DEB_D("buf == NULL!\n"); 349 - return res | POLLERR; 349 + return res | EPOLLERR; 350 350 } 351 351 352 352 poll_wait(file, &buf->done, wait); 353 353 if (buf->state == VIDEOBUF_DONE || buf->state == VIDEOBUF_ERROR) { 354 354 DEB_D("poll succeeded!\n"); 355 - return res | POLLIN | POLLRDNORM; 355 + return res | EPOLLIN | EPOLLRDNORM; 356 356 } 357 357 358 358 DEB_D("nothing to poll for, buf->state:%d\n", buf->state);
+1 -1
drivers/media/common/siano/smsdvb-debugfs.c
··· 371 371 rc = smsdvb_stats_wait_read(debug_data); 372 372 kref_put(&debug_data->refcount, smsdvb_debugfs_data_release); 373 373 374 - return rc > 0 ? POLLIN | POLLRDNORM : 0; 374 + return rc > 0 ? EPOLLIN | EPOLLRDNORM : 0; 375 375 } 376 376 377 377 static ssize_t smsdvb_stats_read(struct file *file, char __user *user_buf,
+15 -15
drivers/media/common/videobuf2/videobuf2-core.c
··· 2038 2038 struct vb2_buffer *vb = NULL; 2039 2039 unsigned long flags; 2040 2040 2041 - if (!q->is_output && !(req_events & (POLLIN | POLLRDNORM))) 2041 + if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM))) 2042 2042 return 0; 2043 - if (q->is_output && !(req_events & (POLLOUT | POLLWRNORM))) 2043 + if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM))) 2044 2044 return 0; 2045 2045 2046 2046 /* ··· 2048 2048 */ 2049 2049 if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) { 2050 2050 if (!q->is_output && (q->io_modes & VB2_READ) && 2051 - (req_events & (POLLIN | POLLRDNORM))) { 2051 + (req_events & (EPOLLIN | EPOLLRDNORM))) { 2052 2052 if (__vb2_init_fileio(q, 1)) 2053 - return POLLERR; 2053 + return EPOLLERR; 2054 2054 } 2055 2055 if (q->is_output && (q->io_modes & VB2_WRITE) && 2056 - (req_events & (POLLOUT | POLLWRNORM))) { 2056 + (req_events & (EPOLLOUT | EPOLLWRNORM))) { 2057 2057 if (__vb2_init_fileio(q, 0)) 2058 - return POLLERR; 2058 + return EPOLLERR; 2059 2059 /* 2060 2060 * Write to OUTPUT queue can be done immediately. 2061 2061 */ 2062 - return POLLOUT | POLLWRNORM; 2062 + return EPOLLOUT | EPOLLWRNORM; 2063 2063 } 2064 2064 } 2065 2065 ··· 2068 2068 * error flag is set. 2069 2069 */ 2070 2070 if (!vb2_is_streaming(q) || q->error) 2071 - return POLLERR; 2071 + return EPOLLERR; 2072 2072 2073 2073 /* 2074 2074 * If this quirk is set and QBUF hasn't been called yet then 2075 - * return POLLERR as well. This only affects capture queues, output 2075 + * return EPOLLERR as well. This only affects capture queues, output 2076 2076 * queues will always initialize waiting_for_buffers to false. 2077 2077 * This quirk is set by V4L2 for backwards compatibility reasons. 2078 2078 */ 2079 2079 if (q->quirk_poll_must_check_waiting_for_buffers && 2080 - q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM))) 2081 - return POLLERR; 2080 + q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM))) 2081 + return EPOLLERR; 2082 2082 2083 2083 /* 2084 2084 * For output streams you can call write() as long as there are fewer 2085 2085 * buffers queued than there are buffers available. 2086 2086 */ 2087 2087 if (q->is_output && q->fileio && q->queued_count < q->num_buffers) 2088 - return POLLOUT | POLLWRNORM; 2088 + return EPOLLOUT | EPOLLWRNORM; 2089 2089 2090 2090 if (list_empty(&q->done_list)) { 2091 2091 /* ··· 2093 2093 * return immediately. DQBUF will return -EPIPE. 2094 2094 */ 2095 2095 if (q->last_buffer_dequeued) 2096 - return POLLIN | POLLRDNORM; 2096 + return EPOLLIN | EPOLLRDNORM; 2097 2097 2098 2098 poll_wait(file, &q->done_wq, wait); 2099 2099 } ··· 2110 2110 if (vb && (vb->state == VB2_BUF_STATE_DONE 2111 2111 || vb->state == VB2_BUF_STATE_ERROR)) { 2112 2112 return (q->is_output) ? 2113 - POLLOUT | POLLWRNORM : 2114 - POLLIN | POLLRDNORM; 2113 + EPOLLOUT | EPOLLWRNORM : 2114 + EPOLLIN | EPOLLRDNORM; 2115 2115 } 2116 2116 return 0; 2117 2117 }
+4 -4
drivers/media/common/videobuf2/videobuf2-v4l2.c
··· 658 658 == V4L2_BUF_FLAG_TIMESTAMP_COPY; 659 659 /* 660 660 * For compatibility with vb1: if QBUF hasn't been called yet, then 661 - * return POLLERR as well. This only affects capture queues, output 661 + * return EPOLLERR as well. This only affects capture queues, output 662 662 * queues will always initialize waiting_for_buffers to false. 663 663 */ 664 664 q->quirk_poll_must_check_waiting_for_buffers = true; ··· 683 683 struct v4l2_fh *fh = file->private_data; 684 684 685 685 if (v4l2_event_pending(fh)) 686 - res = POLLPRI; 687 - else if (req_events & POLLPRI) 686 + res = EPOLLPRI; 687 + else if (req_events & EPOLLPRI) 688 688 poll_wait(file, &fh->wait, wait); 689 689 } 690 690 ··· 921 921 WARN_ON(!lock); 922 922 923 923 if (lock && mutex_lock_interruptible(lock)) 924 - return POLLERR; 924 + return EPOLLERR; 925 925 926 926 fileio = q->fileio; 927 927
+7 -7
drivers/media/dvb-core/dmxdev.c
··· 1179 1179 __poll_t mask = 0; 1180 1180 1181 1181 if ((!dmxdevfilter) || dmxdevfilter->dev->exit) 1182 - return POLLERR; 1182 + return EPOLLERR; 1183 1183 if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) 1184 1184 return dvb_vb2_poll(&dmxdevfilter->vb2_ctx, file, wait); 1185 1185 ··· 1191 1191 return 0; 1192 1192 1193 1193 if (dmxdevfilter->buffer.error) 1194 - mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR); 1194 + mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR); 1195 1195 1196 1196 if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer)) 1197 - mask |= (POLLIN | POLLRDNORM | POLLPRI); 1197 + mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI); 1198 1198 1199 1199 return mask; 1200 1200 } ··· 1331 1331 dprintk("%s\n", __func__); 1332 1332 1333 1333 if (dmxdev->exit) 1334 - return POLLERR; 1334 + return EPOLLERR; 1335 1335 if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx)) 1336 1336 return dvb_vb2_poll(&dmxdev->dvr_vb2_ctx, file, wait); 1337 1337 ··· 1343 1343 #endif 1344 1344 if (need_ringbuffer) { 1345 1345 if (dmxdev->dvr_buffer.error) 1346 - mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR); 1346 + mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR); 1347 1347 1348 1348 if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer)) 1349 - mask |= (POLLIN | POLLRDNORM | POLLPRI); 1349 + mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI); 1350 1350 } else 1351 - mask |= (POLLOUT | POLLWRNORM | POLLPRI); 1351 + mask |= (EPOLLOUT | EPOLLWRNORM | EPOLLPRI); 1352 1352 1353 1353 return mask; 1354 1354 }
+2 -2
drivers/media/dvb-core/dvb_ca_en50221.c
··· 1796 1796 dprintk("%s\n", __func__); 1797 1797 1798 1798 if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) 1799 - mask |= POLLIN; 1799 + mask |= EPOLLIN; 1800 1800 1801 1801 /* if there is something, return now */ 1802 1802 if (mask) ··· 1806 1806 poll_wait(file, &ca->wait_queue, wait); 1807 1807 1808 1808 if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) 1809 - mask |= POLLIN; 1809 + mask |= EPOLLIN; 1810 1810 1811 1811 return mask; 1812 1812 }
+1 -1
drivers/media/dvb-core/dvb_frontend.c
··· 2646 2646 poll_wait (file, &fepriv->events.wait_queue, wait); 2647 2647 2648 2648 if (fepriv->events.eventw != fepriv->events.eventr) 2649 - return (POLLIN | POLLRDNORM | POLLPRI); 2649 + return (EPOLLIN | EPOLLRDNORM | EPOLLPRI); 2650 2650 2651 2651 return 0; 2652 2652 }
+1 -1
drivers/media/firewire/firedtv-ci.c
··· 209 209 210 210 static __poll_t fdtv_ca_io_poll(struct file *file, poll_table *wait) 211 211 { 212 - return POLLIN; 212 + return EPOLLIN; 213 213 } 214 214 215 215 static const struct file_operations fdtv_ca_fops = {
+1 -1
drivers/media/i2c/saa6588.c
··· 413 413 case SAA6588_CMD_POLL: 414 414 a->result = 0; 415 415 if (s->data_available_for_read) 416 - a->result |= POLLIN | POLLRDNORM; 416 + a->result |= EPOLLIN | EPOLLRDNORM; 417 417 poll_wait(a->instance, &s->read_queue, a->event_list); 418 418 break; 419 419
+1 -1
drivers/media/media-devnode.c
··· 105 105 struct media_devnode *devnode = media_devnode_data(filp); 106 106 107 107 if (!media_devnode_is_registered(devnode)) 108 - return POLLERR | POLLHUP; 108 + return EPOLLERR | EPOLLHUP; 109 109 if (!devnode->fops->poll) 110 110 return DEFAULT_POLLMASK; 111 111 return devnode->fops->poll(filp, poll);
+11 -11
drivers/media/pci/bt8xx/bttv-driver.c
··· 2964 2964 __poll_t req_events = poll_requested_events(wait); 2965 2965 2966 2966 if (v4l2_event_pending(&fh->fh)) 2967 - rc = POLLPRI; 2968 - else if (req_events & POLLPRI) 2967 + rc = EPOLLPRI; 2968 + else if (req_events & EPOLLPRI) 2969 2969 poll_wait(file, &fh->fh.wait, wait); 2970 2970 2971 - if (!(req_events & (POLLIN | POLLRDNORM))) 2971 + if (!(req_events & (EPOLLIN | EPOLLRDNORM))) 2972 2972 return rc; 2973 2973 2974 2974 if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) { 2975 2975 if (!check_alloc_btres_lock(fh->btv,fh,RESOURCE_VBI)) 2976 - return rc | POLLERR; 2976 + return rc | EPOLLERR; 2977 2977 return rc | videobuf_poll_stream(file, &fh->vbi, wait); 2978 2978 } 2979 2979 2980 2980 if (check_btres(fh,RESOURCE_VIDEO_STREAM)) { 2981 2981 /* streaming capture */ 2982 2982 if (list_empty(&fh->cap.stream)) 2983 - return rc | POLLERR; 2983 + return rc | EPOLLERR; 2984 2984 buf = list_entry(fh->cap.stream.next,struct bttv_buffer,vb.stream); 2985 2985 } else { 2986 2986 /* read() capture */ 2987 2987 if (NULL == fh->cap.read_buf) { 2988 2988 /* need to capture a new frame */ 2989 2989 if (locked_btres(fh->btv,RESOURCE_VIDEO_STREAM)) 2990 - return rc | POLLERR; 2990 + return rc | EPOLLERR; 2991 2991 fh->cap.read_buf = videobuf_sg_alloc(fh->cap.msize); 2992 2992 if (NULL == fh->cap.read_buf) 2993 - return rc | POLLERR; 2993 + return rc | EPOLLERR; 2994 2994 fh->cap.read_buf->memory = V4L2_MEMORY_USERPTR; 2995 2995 field = videobuf_next_field(&fh->cap); 2996 2996 if (0 != fh->cap.ops->buf_prepare(&fh->cap,fh->cap.read_buf,field)) { 2997 2997 kfree (fh->cap.read_buf); 2998 2998 fh->cap.read_buf = NULL; 2999 - return rc | POLLERR; 2999 + return rc | EPOLLERR; 3000 3000 } 3001 3001 fh->cap.ops->buf_queue(&fh->cap,fh->cap.read_buf); 3002 3002 fh->cap.read_off = 0; ··· 3007 3007 poll_wait(file, &buf->vb.done, wait); 3008 3008 if (buf->vb.state == VIDEOBUF_DONE || 3009 3009 buf->vb.state == VIDEOBUF_ERROR) 3010 - rc = rc | POLLIN|POLLRDNORM; 3010 + rc = rc | EPOLLIN|EPOLLRDNORM; 3011 3011 return rc; 3012 3012 } 3013 3013 ··· 3338 3338 __poll_t res = 0; 3339 3339 3340 3340 if (v4l2_event_pending(&fh->fh)) 3341 - res = POLLPRI; 3342 - else if (req_events & POLLPRI) 3341 + res = EPOLLPRI; 3342 + else if (req_events & EPOLLPRI) 3343 3343 poll_wait(file, &fh->fh.wait, wait); 3344 3344 radio_enable(btv); 3345 3345 cmd.instance = file;
+8 -8
drivers/media/pci/cx18/cx18-fileops.c
··· 613 613 614 614 /* Start a capture if there is none */ 615 615 if (!eof && !test_bit(CX18_F_S_STREAMING, &s->s_flags) && 616 - (req_events & (POLLIN | POLLRDNORM))) { 616 + (req_events & (EPOLLIN | EPOLLRDNORM))) { 617 617 int rc; 618 618 619 619 mutex_lock(&cx->serialize_lock); ··· 622 622 if (rc) { 623 623 CX18_DEBUG_INFO("Could not start capture for %s (%d)\n", 624 624 s->name, rc); 625 - return POLLERR; 625 + return EPOLLERR; 626 626 } 627 627 CX18_DEBUG_FILE("Encoder poll started capture\n"); 628 628 } ··· 632 632 __poll_t videobuf_poll = videobuf_poll_stream(filp, &s->vbuf_q, wait); 633 633 634 634 if (v4l2_event_pending(&id->fh)) 635 - res |= POLLPRI; 636 - if (eof && videobuf_poll == POLLERR) 637 - return res | POLLHUP; 635 + res |= EPOLLPRI; 636 + if (eof && videobuf_poll == EPOLLERR) 637 + return res | EPOLLHUP; 638 638 return res | videobuf_poll; 639 639 } 640 640 641 641 /* add stream's waitq to the poll list */ 642 642 CX18_DEBUG_HI_FILE("Encoder poll\n"); 643 643 if (v4l2_event_pending(&id->fh)) 644 - res |= POLLPRI; 644 + res |= EPOLLPRI; 645 645 else 646 646 poll_wait(filp, &s->waitq, wait); 647 647 648 648 if (atomic_read(&s->q_full.depth)) 649 - return res | POLLIN | POLLRDNORM; 649 + return res | EPOLLIN | EPOLLRDNORM; 650 650 if (eof) 651 - return res | POLLHUP; 651 + return res | EPOLLHUP; 652 652 return res; 653 653 } 654 654
+2 -2
drivers/media/pci/ddbridge/ddbridge-core.c
··· 745 745 poll_wait(file, &input->dma->wq, wait); 746 746 poll_wait(file, &output->dma->wq, wait); 747 747 if (ddb_input_avail(input) >= 188) 748 - mask |= POLLIN | POLLRDNORM; 748 + mask |= EPOLLIN | EPOLLRDNORM; 749 749 if (ddb_output_free(output) >= 188) 750 - mask |= POLLOUT | POLLWRNORM; 750 + mask |= EPOLLOUT | EPOLLWRNORM; 751 751 return mask; 752 752 } 753 753
+8 -8
drivers/media/pci/ivtv/ivtv-fileops.c
··· 747 747 /* Turn off the old-style vsync events */ 748 748 clear_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags); 749 749 if (v4l2_event_pending(&id->fh)) 750 - res = POLLPRI; 750 + res = EPOLLPRI; 751 751 } else { 752 752 /* This is the old-style API which is here only for backwards 753 753 compatibility. */ ··· 755 755 set_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags); 756 756 if (test_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags) || 757 757 test_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags)) 758 - res = POLLPRI; 758 + res = EPOLLPRI; 759 759 } 760 760 761 761 /* Allow write if buffers are available for writing */ 762 762 if (s->q_free.buffers) 763 - res |= POLLOUT | POLLWRNORM; 763 + res |= EPOLLOUT | EPOLLWRNORM; 764 764 return res; 765 765 } 766 766 ··· 776 776 /* Start a capture if there is none */ 777 777 if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags) && 778 778 s->type != IVTV_ENC_STREAM_TYPE_RAD && 779 - (req_events & (POLLIN | POLLRDNORM))) { 779 + (req_events & (EPOLLIN | EPOLLRDNORM))) { 780 780 int rc; 781 781 782 782 mutex_lock(&itv->serialize_lock); ··· 785 785 if (rc) { 786 786 IVTV_DEBUG_INFO("Could not start capture for %s (%d)\n", 787 787 s->name, rc); 788 - return POLLERR; 788 + return EPOLLERR; 789 789 } 790 790 IVTV_DEBUG_FILE("Encoder poll started capture\n"); 791 791 } ··· 794 794 IVTV_DEBUG_HI_FILE("Encoder poll\n"); 795 795 poll_wait(filp, &s->waitq, wait); 796 796 if (v4l2_event_pending(&id->fh)) 797 - res |= POLLPRI; 797 + res |= EPOLLPRI; 798 798 else 799 799 poll_wait(filp, &id->fh.wait, wait); 800 800 801 801 if (s->q_full.length || s->q_io.length) 802 - return res | POLLIN | POLLRDNORM; 802 + return res | EPOLLIN | EPOLLRDNORM; 803 803 if (eof) 804 - return res | POLLHUP; 804 + return res | EPOLLHUP; 805 805 return res; 806 806 } 807 807
+1 -1
drivers/media/pci/meye/meye.c
··· 1430 1430 mutex_lock(&meye.lock); 1431 1431 poll_wait(file, &meye.proc_list, wait); 1432 1432 if (kfifo_len(&meye.doneq)) 1433 - res |= POLLIN | POLLRDNORM; 1433 + res |= EPOLLIN | EPOLLRDNORM; 1434 1434 mutex_unlock(&meye.lock); 1435 1435 return res; 1436 1436 }
+3 -3
drivers/media/pci/saa7164/saa7164-encoder.c
··· 925 925 saa7164_histogram_update(&port->poll_interval, 926 926 port->last_poll_msecs_diff); 927 927 928 - if (!(req_events & (POLLIN | POLLRDNORM))) 928 + if (!(req_events & (EPOLLIN | EPOLLRDNORM))) 929 929 return mask; 930 930 931 931 if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) { 932 932 if (atomic_inc_return(&port->v4l_reader_count) == 1) { 933 933 if (saa7164_encoder_initialize(port) < 0) 934 - return mask | POLLERR; 934 + return mask | EPOLLERR; 935 935 saa7164_encoder_start_streaming(port); 936 936 msleep(200); 937 937 } ··· 939 939 940 940 /* Pull the first buffer from the used list */ 941 941 if (!list_empty(&port->list_buf_used.list)) 942 - mask |= POLLIN | POLLRDNORM; 942 + mask |= EPOLLIN | EPOLLRDNORM; 943 943 944 944 return mask; 945 945 }
+1 -1
drivers/media/pci/saa7164/saa7164-vbi.c
··· 650 650 651 651 /* Pull the first buffer from the used list */ 652 652 if (!list_empty(&port->list_buf_used.list)) 653 - mask |= POLLIN | POLLRDNORM; 653 + mask |= EPOLLIN | EPOLLRDNORM; 654 654 655 655 return mask; 656 656 }
+5 -5
drivers/media/pci/ttpci/av7110_av.c
··· 951 951 poll_wait(file, &av7110->video_events.wait_queue, wait); 952 952 953 953 if (av7110->video_events.eventw != av7110->video_events.eventr) 954 - mask = POLLPRI; 954 + mask = EPOLLPRI; 955 955 956 956 if ((file->f_flags & O_ACCMODE) != O_RDONLY) { 957 957 if (av7110->playing) { 958 958 if (FREE_COND) 959 - mask |= (POLLOUT | POLLWRNORM); 959 + mask |= (EPOLLOUT | EPOLLWRNORM); 960 960 } else { 961 961 /* if not playing: may play if asked for */ 962 - mask |= (POLLOUT | POLLWRNORM); 962 + mask |= (EPOLLOUT | EPOLLWRNORM); 963 963 } 964 964 } 965 965 ··· 1001 1001 1002 1002 if (av7110->playing) { 1003 1003 if (dvb_ringbuffer_free(&av7110->aout) >= 20 * 1024) 1004 - mask |= (POLLOUT | POLLWRNORM); 1004 + mask |= (EPOLLOUT | EPOLLWRNORM); 1005 1005 } else /* if not playing: may play if asked for */ 1006 - mask = (POLLOUT | POLLWRNORM); 1006 + mask = (EPOLLOUT | EPOLLWRNORM); 1007 1007 1008 1008 return mask; 1009 1009 }
+2 -2
drivers/media/pci/ttpci/av7110_ca.c
··· 237 237 poll_wait(file, &wbuf->queue, wait); 238 238 239 239 if (!dvb_ringbuffer_empty(rbuf)) 240 - mask |= (POLLIN | POLLRDNORM); 240 + mask |= (EPOLLIN | EPOLLRDNORM); 241 241 242 242 if (dvb_ringbuffer_free(wbuf) > 1024) 243 - mask |= (POLLOUT | POLLWRNORM); 243 + mask |= (EPOLLOUT | EPOLLWRNORM); 244 244 245 245 return mask; 246 246 }
+8 -8
drivers/media/pci/zoran/zoran_driver.c
··· 2513 2513 2514 2514 /* we should check whether buffers are ready to be synced on 2515 2515 * (w/o waits - O_NONBLOCK) here 2516 - * if ready for read (sync), return POLLIN|POLLRDNORM, 2517 - * if ready for write (sync), return POLLOUT|POLLWRNORM, 2518 - * if error, return POLLERR, 2519 - * if no buffers queued or so, return POLLNVAL 2516 + * if ready for read (sync), return EPOLLIN|EPOLLRDNORM, 2517 + * if ready for write (sync), return EPOLLOUT|EPOLLWRNORM, 2518 + * if error, return EPOLLERR, 2519 + * if no buffers queued or so, return EPOLLNVAL 2520 2520 */ 2521 2521 2522 2522 switch (fh->map_mode) { ··· 2536 2536 if (fh->buffers.active != ZORAN_FREE && 2537 2537 /* Buffer ready to DQBUF? */ 2538 2538 zr->v4l_buffers.buffer[frame].state == BUZ_STATE_DONE) 2539 - res |= POLLIN | POLLRDNORM; 2539 + res |= EPOLLIN | EPOLLRDNORM; 2540 2540 spin_unlock_irqrestore(&zr->spinlock, flags); 2541 2541 2542 2542 break; ··· 2557 2557 if (fh->buffers.active != ZORAN_FREE && 2558 2558 zr->jpg_buffers.buffer[frame].state == BUZ_STATE_DONE) { 2559 2559 if (fh->map_mode == ZORAN_MAP_MODE_JPG_REC) 2560 - res |= POLLIN | POLLRDNORM; 2560 + res |= EPOLLIN | EPOLLRDNORM; 2561 2561 else 2562 - res |= POLLOUT | POLLWRNORM; 2562 + res |= EPOLLOUT | EPOLLWRNORM; 2563 2563 } 2564 2564 spin_unlock_irqrestore(&zr->spinlock, flags); 2565 2565 ··· 2570 2570 KERN_ERR 2571 2571 "%s: %s - internal error, unknown map_mode=%d\n", 2572 2572 ZR_DEVNAME(zr), __func__, fh->map_mode); 2573 - res |= POLLERR; 2573 + res |= EPOLLERR; 2574 2574 } 2575 2575 2576 2576 return res;
+2 -2
drivers/media/platform/fsl-viu.c
··· 1272 1272 __poll_t res = v4l2_ctrl_poll(file, wait); 1273 1273 1274 1274 if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type) 1275 - return POLLERR; 1275 + return EPOLLERR; 1276 1276 1277 - if (!(req_events & (POLLIN | POLLRDNORM))) 1277 + if (!(req_events & (EPOLLIN | EPOLLRDNORM))) 1278 1278 return res; 1279 1279 1280 1280 mutex_lock(&dev->lock);
+4 -4
drivers/media/platform/s5p-mfc/s5p_mfc.c
··· 1008 1008 */ 1009 1009 if ((!src_q->streaming || list_empty(&src_q->queued_list)) 1010 1010 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) { 1011 - rc = POLLERR; 1011 + rc = EPOLLERR; 1012 1012 goto end; 1013 1013 } 1014 1014 mutex_unlock(&dev->mfc_mutex); ··· 1017 1017 poll_wait(file, &dst_q->done_wq, wait); 1018 1018 mutex_lock(&dev->mfc_mutex); 1019 1019 if (v4l2_event_pending(&ctx->fh)) 1020 - rc |= POLLPRI; 1020 + rc |= EPOLLPRI; 1021 1021 spin_lock_irqsave(&src_q->done_lock, flags); 1022 1022 if (!list_empty(&src_q->done_list)) 1023 1023 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer, 1024 1024 done_entry); 1025 1025 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE 1026 1026 || src_vb->state == VB2_BUF_STATE_ERROR)) 1027 - rc |= POLLOUT | POLLWRNORM; 1027 + rc |= EPOLLOUT | EPOLLWRNORM; 1028 1028 spin_unlock_irqrestore(&src_q->done_lock, flags); 1029 1029 spin_lock_irqsave(&dst_q->done_lock, flags); 1030 1030 if (!list_empty(&dst_q->done_list)) ··· 1032 1032 done_entry); 1033 1033 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE 1034 1034 || dst_vb->state == VB2_BUF_STATE_ERROR)) 1035 - rc |= POLLIN | POLLRDNORM; 1035 + rc |= EPOLLIN | EPOLLRDNORM; 1036 1036 spin_unlock_irqrestore(&dst_q->done_lock, flags); 1037 1037 end: 1038 1038 mutex_unlock(&dev->mfc_mutex);
+2 -2
drivers/media/platform/soc_camera/soc_camera.c
··· 809 809 { 810 810 struct soc_camera_device *icd = file->private_data; 811 811 struct soc_camera_host *ici = to_soc_camera_host(icd->parent); 812 - __poll_t res = POLLERR; 812 + __poll_t res = EPOLLERR; 813 813 814 814 if (icd->streamer != file) 815 - return POLLERR; 815 + return EPOLLERR; 816 816 817 817 mutex_lock(&ici->host_lock); 818 818 res = ici->ops->poll(file, pt);
+1 -1
drivers/media/platform/vivid/vivid-radio-rx.c
··· 142 142 143 143 __poll_t vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait) 144 144 { 145 - return POLLIN | POLLRDNORM | v4l2_ctrl_poll(file, wait); 145 + return EPOLLIN | EPOLLRDNORM | v4l2_ctrl_poll(file, wait); 146 146 } 147 147 148 148 int vivid_radio_rx_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band)
+1 -1
drivers/media/platform/vivid/vivid-radio-tx.c
··· 105 105 106 106 __poll_t vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait) 107 107 { 108 - return POLLOUT | POLLWRNORM | v4l2_ctrl_poll(file, wait); 108 + return EPOLLOUT | EPOLLWRNORM | v4l2_ctrl_poll(file, wait); 109 109 } 110 110 111 111 int vidioc_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a)
+2 -2
drivers/media/radio/radio-cadet.c
··· 488 488 __poll_t res = v4l2_ctrl_poll(file, wait); 489 489 490 490 poll_wait(file, &dev->read_queue, wait); 491 - if (dev->rdsstat == 0 && (req_events & (POLLIN | POLLRDNORM))) { 491 + if (dev->rdsstat == 0 && (req_events & (EPOLLIN | EPOLLRDNORM))) { 492 492 mutex_lock(&dev->lock); 493 493 if (dev->rdsstat == 0) 494 494 cadet_start_rds(dev); 495 495 mutex_unlock(&dev->lock); 496 496 } 497 497 if (cadet_has_rds_data(dev)) 498 - res |= POLLIN | POLLRDNORM; 498 + res |= EPOLLIN | EPOLLRDNORM; 499 499 return res; 500 500 } 501 501
+3 -3
drivers/media/radio/radio-si476x.c
··· 1158 1158 __poll_t req_events = poll_requested_events(pts); 1159 1159 __poll_t err = v4l2_ctrl_poll(file, pts); 1160 1160 1161 - if (req_events & (POLLIN | POLLRDNORM)) { 1161 + if (req_events & (EPOLLIN | EPOLLRDNORM)) { 1162 1162 if (atomic_read(&radio->core->is_alive)) 1163 1163 poll_wait(file, &radio->core->rds_read_queue, pts); 1164 1164 1165 1165 if (!atomic_read(&radio->core->is_alive)) 1166 - err = POLLHUP; 1166 + err = EPOLLHUP; 1167 1167 1168 1168 if (!kfifo_is_empty(&radio->core->rds_fifo)) 1169 - err = POLLIN | POLLRDNORM; 1169 + err = EPOLLIN | EPOLLRDNORM; 1170 1170 } 1171 1171 1172 1172 return err;
+2 -2
drivers/media/radio/radio-wl1273.c
··· 1104 1104 poll_wait(file, &radio->read_queue, pts); 1105 1105 1106 1106 if (radio->rd_index != radio->wr_index) 1107 - return POLLIN | POLLRDNORM; 1107 + return EPOLLIN | EPOLLRDNORM; 1108 1108 1109 1109 } else if (core->mode == WL1273_MODE_TX) { 1110 - return POLLOUT | POLLWRNORM; 1110 + return EPOLLOUT | EPOLLWRNORM; 1111 1111 } 1112 1112 1113 1113 return 0;
+2 -2
drivers/media/radio/si470x/radio-si470x-common.c
··· 514 514 __poll_t req_events = poll_requested_events(pts); 515 515 __poll_t retval = v4l2_ctrl_poll(file, pts); 516 516 517 - if (req_events & (POLLIN | POLLRDNORM)) { 517 + if (req_events & (EPOLLIN | EPOLLRDNORM)) { 518 518 /* switch on rds reception */ 519 519 if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0) 520 520 si470x_rds_on(radio); ··· 522 522 poll_wait(file, &radio->read_queue, pts); 523 523 524 524 if (radio->rd_index != radio->wr_index) 525 - retval |= POLLIN | POLLRDNORM; 525 + retval |= EPOLLIN | EPOLLRDNORM; 526 526 } 527 527 528 528 return retval;
+1 -1
drivers/media/radio/wl128x/fmdrv_v4l2.c
··· 112 112 ret = fmc_is_rds_data_available(fmdev, file, pts); 113 113 mutex_unlock(&fmdev->mutex); 114 114 if (ret < 0) 115 - return POLLIN | POLLRDNORM; 115 + return EPOLLIN | EPOLLRDNORM; 116 116 117 117 return 0; 118 118 }
+6 -6
drivers/media/rc/lirc_dev.c
··· 109 109 if (LIRC_IS_TIMEOUT(sample) && !fh->send_timeout_reports) 110 110 continue; 111 111 if (kfifo_put(&fh->rawir, sample)) 112 - wake_up_poll(&fh->wait_poll, POLLIN | POLLRDNORM); 112 + wake_up_poll(&fh->wait_poll, EPOLLIN | EPOLLRDNORM); 113 113 } 114 114 spin_unlock_irqrestore(&dev->lirc_fh_lock, flags); 115 115 } ··· 130 130 spin_lock_irqsave(&dev->lirc_fh_lock, flags); 131 131 list_for_each_entry(fh, &dev->lirc_fh, list) { 132 132 if (kfifo_put(&fh->scancodes, *lsc)) 133 - wake_up_poll(&fh->wait_poll, POLLIN | POLLRDNORM); 133 + wake_up_poll(&fh->wait_poll, EPOLLIN | EPOLLRDNORM); 134 134 } 135 135 spin_unlock_irqrestore(&dev->lirc_fh_lock, flags); 136 136 } ··· 603 603 poll_wait(file, &fh->wait_poll, wait); 604 604 605 605 if (!rcdev->registered) { 606 - events = POLLHUP | POLLERR; 606 + events = EPOLLHUP | EPOLLERR; 607 607 } else if (rcdev->driver_type != RC_DRIVER_IR_RAW_TX) { 608 608 if (fh->rec_mode == LIRC_MODE_SCANCODE && 609 609 !kfifo_is_empty(&fh->scancodes)) 610 - events = POLLIN | POLLRDNORM; 610 + events = EPOLLIN | EPOLLRDNORM; 611 611 612 612 if (fh->rec_mode == LIRC_MODE_MODE2 && 613 613 !kfifo_is_empty(&fh->rawir)) 614 - events = POLLIN | POLLRDNORM; 614 + events = EPOLLIN | EPOLLRDNORM; 615 615 } 616 616 617 617 return events; ··· 779 779 780 780 spin_lock_irqsave(&dev->lirc_fh_lock, flags); 781 781 list_for_each_entry(fh, &dev->lirc_fh, list) 782 - wake_up_poll(&fh->wait_poll, POLLHUP | POLLERR); 782 + wake_up_poll(&fh->wait_poll, EPOLLHUP | EPOLLERR); 783 783 spin_unlock_irqrestore(&dev->lirc_fh_lock, flags); 784 784 785 785 cdev_device_del(&dev->lirc_cdev, &dev->lirc_dev);
+2 -2
drivers/media/usb/cpia2/cpia2_core.c
··· 2375 2375 { 2376 2376 __poll_t status = v4l2_ctrl_poll(filp, wait); 2377 2377 2378 - if ((poll_requested_events(wait) & (POLLIN | POLLRDNORM)) && 2378 + if ((poll_requested_events(wait) & (EPOLLIN | EPOLLRDNORM)) && 2379 2379 !cam->streaming) { 2380 2380 /* Start streaming */ 2381 2381 cpia2_usb_stream_start(cam, ··· 2385 2385 poll_wait(filp, &cam->wq_stream, wait); 2386 2386 2387 2387 if (cam->curbuff->status == FRAME_READY) 2388 - status |= POLLIN | POLLRDNORM; 2388 + status |= EPOLLIN | EPOLLRDNORM; 2389 2389 2390 2390 return status; 2391 2391 }
+2 -2
drivers/media/usb/cx231xx/cx231xx-417.c
··· 1821 1821 __poll_t res = 0; 1822 1822 1823 1823 if (v4l2_event_pending(&fh->fh)) 1824 - res |= POLLPRI; 1824 + res |= EPOLLPRI; 1825 1825 else 1826 1826 poll_wait(file, &fh->fh.wait, wait); 1827 1827 1828 - if (!(req_events & (POLLIN | POLLRDNORM))) 1828 + if (!(req_events & (EPOLLIN | EPOLLRDNORM))) 1829 1829 return res; 1830 1830 1831 1831 mutex_lock(&dev->lock);
+5 -5
drivers/media/usb/cx231xx/cx231xx-video.c
··· 2018 2018 2019 2019 rc = check_dev(dev); 2020 2020 if (rc < 0) 2021 - return POLLERR; 2021 + return EPOLLERR; 2022 2022 2023 2023 rc = res_get(fh); 2024 2024 2025 2025 if (unlikely(rc < 0)) 2026 - return POLLERR; 2026 + return EPOLLERR; 2027 2027 2028 2028 if (v4l2_event_pending(&fh->fh)) 2029 - res |= POLLPRI; 2029 + res |= EPOLLPRI; 2030 2030 else 2031 2031 poll_wait(filp, &fh->fh.wait, wait); 2032 2032 2033 - if (!(req_events & (POLLIN | POLLRDNORM))) 2033 + if (!(req_events & (EPOLLIN | EPOLLRDNORM))) 2034 2034 return res; 2035 2035 2036 2036 if ((V4L2_BUF_TYPE_VIDEO_CAPTURE == fh->type) || ··· 2040 2040 mutex_unlock(&dev->lock); 2041 2041 return res; 2042 2042 } 2043 - return res | POLLERR; 2043 + return res | EPOLLERR; 2044 2044 } 2045 2045 2046 2046 /*
+6 -6
drivers/media/usb/gspca/gspca.c
··· 1877 1877 1878 1878 gspca_dbg(gspca_dev, D_FRAM, "poll\n"); 1879 1879 1880 - if (req_events & POLLPRI) 1880 + if (req_events & EPOLLPRI) 1881 1881 ret |= v4l2_ctrl_poll(file, wait); 1882 1882 1883 - if (req_events & (POLLIN | POLLRDNORM)) { 1883 + if (req_events & (EPOLLIN | EPOLLRDNORM)) { 1884 1884 /* if reqbufs is not done, the user would use read() */ 1885 1885 if (gspca_dev->memory == GSPCA_MEMORY_NO) { 1886 1886 if (read_alloc(gspca_dev, file) != 0) { 1887 - ret |= POLLERR; 1887 + ret |= EPOLLERR; 1888 1888 goto out; 1889 1889 } 1890 1890 } ··· 1893 1893 1894 1894 /* check if an image has been received */ 1895 1895 if (mutex_lock_interruptible(&gspca_dev->queue_lock) != 0) { 1896 - ret |= POLLERR; 1896 + ret |= EPOLLERR; 1897 1897 goto out; 1898 1898 } 1899 1899 if (gspca_dev->fr_o != atomic_read(&gspca_dev->fr_i)) 1900 - ret |= POLLIN | POLLRDNORM; 1900 + ret |= EPOLLIN | EPOLLRDNORM; 1901 1901 mutex_unlock(&gspca_dev->queue_lock); 1902 1902 } 1903 1903 1904 1904 out: 1905 1905 if (!gspca_dev->present) 1906 - ret |= POLLHUP; 1906 + ret |= EPOLLHUP; 1907 1907 1908 1908 return ret; 1909 1909 }
+2 -2
drivers/media/usb/hdpvr/hdpvr-video.c
··· 528 528 struct hdpvr_device *dev = video_drvdata(filp); 529 529 __poll_t mask = v4l2_ctrl_poll(filp, wait); 530 530 531 - if (!(req_events & (POLLIN | POLLRDNORM))) 531 + if (!(req_events & (EPOLLIN | EPOLLRDNORM))) 532 532 return mask; 533 533 534 534 mutex_lock(&dev->io_mutex); ··· 553 553 buf = hdpvr_get_next_buffer(dev); 554 554 } 555 555 if (buf && buf->status == BUFSTAT_READY) 556 - mask |= POLLIN | POLLRDNORM; 556 + mask |= EPOLLIN | EPOLLRDNORM; 557 557 558 558 return mask; 559 559 }
+3 -3
drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
··· 1181 1181 int ret; 1182 1182 1183 1183 if (fh->fw_mode_flag) { 1184 - mask |= POLLIN | POLLRDNORM; 1184 + mask |= EPOLLIN | EPOLLRDNORM; 1185 1185 return mask; 1186 1186 } 1187 1187 1188 1188 if (!fh->rhp) { 1189 1189 ret = pvr2_v4l2_iosetup(fh); 1190 - if (ret) return POLLERR; 1190 + if (ret) return EPOLLERR; 1191 1191 } 1192 1192 1193 1193 poll_wait(file,&fh->wait_data,wait); 1194 1194 1195 1195 if (pvr2_ioread_avail(fh->rhp) >= 0) { 1196 - mask |= POLLIN | POLLRDNORM; 1196 + mask |= EPOLLIN | EPOLLRDNORM; 1197 1197 } 1198 1198 1199 1199 return mask;
+2 -2
drivers/media/usb/stkwebcam/stk-webcam.c
··· 729 729 poll_wait(fp, &dev->wait_frame, wait); 730 730 731 731 if (!is_present(dev)) 732 - return POLLERR; 732 + return EPOLLERR; 733 733 734 734 if (!list_empty(&dev->sio_full)) 735 - return res | POLLIN | POLLRDNORM; 735 + return res | EPOLLIN | EPOLLRDNORM; 736 736 737 737 return res; 738 738 }
+7 -7
drivers/media/usb/tm6000/tm6000-video.c
··· 1424 1424 __poll_t res = 0; 1425 1425 1426 1426 if (v4l2_event_pending(&fh->fh)) 1427 - res = POLLPRI; 1428 - else if (req_events & POLLPRI) 1427 + res = EPOLLPRI; 1428 + else if (req_events & EPOLLPRI) 1429 1429 poll_wait(file, &fh->fh.wait, wait); 1430 1430 if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type) 1431 - return res | POLLERR; 1431 + return res | EPOLLERR; 1432 1432 1433 1433 if (!!is_res_streaming(fh->dev, fh)) 1434 - return res | POLLERR; 1434 + return res | EPOLLERR; 1435 1435 1436 1436 if (!is_res_read(fh->dev, fh)) { 1437 1437 /* streaming capture */ 1438 1438 if (list_empty(&fh->vb_vidq.stream)) 1439 - return res | POLLERR; 1439 + return res | EPOLLERR; 1440 1440 buf = list_entry(fh->vb_vidq.stream.next, struct tm6000_buffer, vb.stream); 1441 1441 poll_wait(file, &buf->vb.done, wait); 1442 1442 if (buf->vb.state == VIDEOBUF_DONE || 1443 1443 buf->vb.state == VIDEOBUF_ERROR) 1444 - return res | POLLIN | POLLRDNORM; 1445 - } else if (req_events & (POLLIN | POLLRDNORM)) { 1444 + return res | EPOLLIN | EPOLLRDNORM; 1445 + } else if (req_events & (EPOLLIN | EPOLLRDNORM)) { 1446 1446 /* read() capture */ 1447 1447 return res | videobuf_poll_stream(file, &fh->vb_vidq, wait); 1448 1448 }
+1 -1
drivers/media/v4l2-core/v4l2-ctrls.c
··· 3462 3462 struct v4l2_fh *fh = file->private_data; 3463 3463 3464 3464 if (v4l2_event_pending(fh)) 3465 - return POLLPRI; 3465 + return EPOLLPRI; 3466 3466 poll_wait(file, &fh->wait, wait); 3467 3467 return 0; 3468 3468 }
+1 -1
drivers/media/v4l2-core/v4l2-dev.c
··· 334 334 static __poll_t v4l2_poll(struct file *filp, struct poll_table_struct *poll) 335 335 { 336 336 struct video_device *vdev = video_devdata(filp); 337 - __poll_t res = POLLERR | POLLHUP; 337 + __poll_t res = EPOLLERR | EPOLLHUP; 338 338 339 339 if (!vdev->fops->poll) 340 340 return DEFAULT_POLLMASK;
+7 -7
drivers/media/v4l2-core/v4l2-mem2mem.c
··· 514 514 struct v4l2_fh *fh = file->private_data; 515 515 516 516 if (v4l2_event_pending(fh)) 517 - rc = POLLPRI; 518 - else if (req_events & POLLPRI) 517 + rc = EPOLLPRI; 518 + else if (req_events & EPOLLPRI) 519 519 poll_wait(file, &fh->wait, wait); 520 - if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM))) 520 + if (!(req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM))) 521 521 return rc; 522 522 } 523 523 ··· 531 531 */ 532 532 if ((!src_q->streaming || list_empty(&src_q->queued_list)) 533 533 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) { 534 - rc |= POLLERR; 534 + rc |= EPOLLERR; 535 535 goto end; 536 536 } 537 537 ··· 548 548 */ 549 549 if (dst_q->last_buffer_dequeued) { 550 550 spin_unlock_irqrestore(&dst_q->done_lock, flags); 551 - return rc | POLLIN | POLLRDNORM; 551 + return rc | EPOLLIN | EPOLLRDNORM; 552 552 } 553 553 554 554 poll_wait(file, &dst_q->done_wq, wait); ··· 561 561 done_entry); 562 562 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE 563 563 || src_vb->state == VB2_BUF_STATE_ERROR)) 564 - rc |= POLLOUT | POLLWRNORM; 564 + rc |= EPOLLOUT | EPOLLWRNORM; 565 565 spin_unlock_irqrestore(&src_q->done_lock, flags); 566 566 567 567 spin_lock_irqsave(&dst_q->done_lock, flags); ··· 570 570 done_entry); 571 571 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE 572 572 || dst_vb->state == VB2_BUF_STATE_ERROR)) 573 - rc |= POLLIN | POLLRDNORM; 573 + rc |= EPOLLIN | EPOLLRDNORM; 574 574 spin_unlock_irqrestore(&dst_q->done_lock, flags); 575 575 576 576 end:
+2 -2
drivers/media/v4l2-core/v4l2-subdev.c
··· 476 476 struct v4l2_fh *fh = file->private_data; 477 477 478 478 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) 479 - return POLLERR; 479 + return EPOLLERR; 480 480 481 481 poll_wait(file, &fh->wait, wait); 482 482 483 483 if (v4l2_event_pending(fh)) 484 - return POLLPRI; 484 + return EPOLLPRI; 485 485 486 486 return 0; 487 487 }
+5 -5
drivers/media/v4l2-core/videobuf-core.c
··· 1131 1131 if (!list_empty(&q->stream)) 1132 1132 buf = list_entry(q->stream.next, 1133 1133 struct videobuf_buffer, stream); 1134 - } else if (req_events & (POLLIN | POLLRDNORM)) { 1134 + } else if (req_events & (EPOLLIN | EPOLLRDNORM)) { 1135 1135 if (!q->reading) 1136 1136 __videobuf_read_start(q); 1137 1137 if (!q->reading) { 1138 - rc = POLLERR; 1138 + rc = EPOLLERR; 1139 1139 } else if (NULL == q->read_buf) { 1140 1140 q->read_buf = list_entry(q->stream.next, 1141 1141 struct videobuf_buffer, ··· 1146 1146 buf = q->read_buf; 1147 1147 } 1148 1148 if (!buf) 1149 - rc = POLLERR; 1149 + rc = EPOLLERR; 1150 1150 1151 1151 if (0 == rc) { 1152 1152 poll_wait(file, &buf->done, wait); ··· 1157 1157 case V4L2_BUF_TYPE_VBI_OUTPUT: 1158 1158 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: 1159 1159 case V4L2_BUF_TYPE_SDR_OUTPUT: 1160 - rc = POLLOUT | POLLWRNORM; 1160 + rc = EPOLLOUT | EPOLLWRNORM; 1161 1161 break; 1162 1162 default: 1163 - rc = POLLIN | POLLRDNORM; 1163 + rc = EPOLLIN | EPOLLRDNORM; 1164 1164 break; 1165 1165 } 1166 1166 }
+1 -1
drivers/mfd/ab8500-debugfs.c
··· 1267 1267 if (irq_abb < num_irqs) 1268 1268 irq_count[irq_abb]++; 1269 1269 /* 1270 - * This makes it possible to use poll for events (POLLPRI | POLLERR) 1270 + * This makes it possible to use poll for events (EPOLLPRI | EPOLLERR) 1271 1271 * from userspace on sysfs file named <irq-nr> 1272 1272 */ 1273 1273 sprintf(buf, "%d", irq);
+2 -2
drivers/misc/cxl/file.c
··· 378 378 379 379 spin_lock_irqsave(&ctx->lock, flags); 380 380 if (ctx_event_pending(ctx)) 381 - mask |= POLLIN | POLLRDNORM; 381 + mask |= EPOLLIN | EPOLLRDNORM; 382 382 else if (ctx->status == CLOSED) 383 383 /* Only error on closed when there are no futher events pending 384 384 */ 385 - mask |= POLLERR; 385 + mask |= EPOLLERR; 386 386 spin_unlock_irqrestore(&ctx->lock, flags); 387 387 388 388 pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask);
+2 -2
drivers/misc/hpilo.c
··· 519 519 poll_wait(fp, &data->ccb_waitq, wait); 520 520 521 521 if (is_channel_reset(driver_ccb)) 522 - return POLLERR; 522 + return EPOLLERR; 523 523 else if (ilo_pkt_recv(data->ilo_hw, driver_ccb)) 524 - return POLLIN | POLLRDNORM; 524 + return EPOLLIN | EPOLLRDNORM; 525 525 526 526 return 0; 527 527 }
+1 -1
drivers/misc/lis3lv02d/lis3lv02d.c
··· 658 658 659 659 poll_wait(file, &lis3->misc_wait, wait); 660 660 if (atomic_read(&lis3->count)) 661 - return POLLIN | POLLRDNORM; 661 + return EPOLLIN | EPOLLRDNORM; 662 662 return 0; 663 663 } 664 664
+6 -6
drivers/misc/mei/main.c
··· 551 551 bool notify_en; 552 552 553 553 if (WARN_ON(!cl || !cl->dev)) 554 - return POLLERR; 554 + return EPOLLERR; 555 555 556 556 dev = cl->dev; 557 557 558 558 mutex_lock(&dev->device_lock); 559 559 560 - notify_en = cl->notify_en && (req_events & POLLPRI); 560 + notify_en = cl->notify_en && (req_events & EPOLLPRI); 561 561 562 562 if (dev->dev_state != MEI_DEV_ENABLED || 563 563 !mei_cl_is_connected(cl)) { 564 - mask = POLLERR; 564 + mask = EPOLLERR; 565 565 goto out; 566 566 } 567 567 568 568 if (notify_en) { 569 569 poll_wait(file, &cl->ev_wait, wait); 570 570 if (cl->notify_ev) 571 - mask |= POLLPRI; 571 + mask |= EPOLLPRI; 572 572 } 573 573 574 - if (req_events & (POLLIN | POLLRDNORM)) { 574 + if (req_events & (EPOLLIN | EPOLLRDNORM)) { 575 575 poll_wait(file, &cl->rx_wait, wait); 576 576 577 577 if (!list_empty(&cl->rd_completed)) 578 - mask |= POLLIN | POLLRDNORM; 578 + mask |= EPOLLIN | EPOLLRDNORM; 579 579 else 580 580 mei_cl_read_start(cl, mei_cl_mtu(cl), file); 581 581 }
+8 -8
drivers/misc/mic/cosm/cosm_scif_server.c
··· 55 55 * message being sent to host SCIF. SCIF_DISCNCT message processing on the 56 56 * host SCIF sets the host COSM SCIF endpoint state to DISCONNECTED and wakes 57 57 * up the host COSM thread blocked in scif_poll(..) resulting in 58 - * scif_poll(..) returning POLLHUP. 58 + * scif_poll(..) returning EPOLLHUP. 59 59 * 5. On the card, scif_peer_release_dev is next called which results in an 60 60 * SCIF_EXIT message being sent to the host and after receiving the 61 61 * SCIF_EXIT_ACK from the host the peer device teardown on the card is ··· 79 79 * processing. This results in the COSM endpoint on the card being closed and 80 80 * the SCIF host peer device on the card getting unregistered similar to 81 81 * steps 3, 4 and 5 for the card shutdown case above. scif_poll(..) on the 82 - * host returns POLLHUP as a result. 82 + * host returns EPOLLHUP as a result. 83 83 * 4. On the host, card peer device unregister and SCIF HW remove(..) also 84 84 * subsequently complete. 85 85 * ··· 87 87 * ---------- 88 88 * If a reset is issued after the card has crashed, there is no SCIF_DISCNT 89 89 * message from the card which would result in scif_poll(..) returning 90 - * POLLHUP. In this case when the host SCIF driver sends a SCIF_REMOVE_NODE 90 + * EPOLLHUP. In this case when the host SCIF driver sends a SCIF_REMOVE_NODE 91 91 * message to itself resulting in the card SCIF peer device being unregistered, 92 92 * this results in a scif_peer_release_dev -> scif_cleanup_scifdev-> 93 93 * scif_invalidate_ep call sequence which sets the endpoint state to 94 - * DISCONNECTED and results in scif_poll(..) returning POLLHUP. 94 + * DISCONNECTED and results in scif_poll(..) returning EPOLLHUP. 95 95 */ 96 96 97 97 #define COSM_SCIF_BACKLOG 16 ··· 190 190 191 191 /* 192 192 * Close this cosm_device's endpoint after its peer endpoint on the card has 193 - * been closed. In all cases except MIC card crash POLLHUP on the host is 193 + * been closed. In all cases except MIC card crash EPOLLHUP on the host is 194 194 * triggered by the client's endpoint being closed. 195 195 */ 196 196 static void cosm_scif_close(struct cosm_device *cdev) ··· 252 252 253 253 while (1) { 254 254 pollepd.epd = cdev->epd; 255 - pollepd.events = POLLIN; 255 + pollepd.events = EPOLLIN; 256 256 257 257 /* Drop the mutex before blocking in scif_poll(..) */ 258 258 mutex_unlock(&cdev->cosm_mutex); ··· 266 266 } 267 267 268 268 /* There is a message from the card */ 269 - if (pollepd.revents & POLLIN) 269 + if (pollepd.revents & EPOLLIN) 270 270 cosm_scif_recv(cdev); 271 271 272 272 /* The peer endpoint is closed or this endpoint disconnected */ 273 - if (pollepd.revents & POLLHUP) { 273 + if (pollepd.revents & EPOLLHUP) { 274 274 cosm_scif_close(cdev); 275 275 break; 276 276 }
+2 -2
drivers/misc/mic/cosm_client/cosm_scif_client.c
··· 160 160 161 161 while (!kthread_should_stop()) { 162 162 pollepd.epd = client_epd; 163 - pollepd.events = POLLIN; 163 + pollepd.events = EPOLLIN; 164 164 165 165 rc = scif_poll(&pollepd, 1, COSM_HEARTBEAT_SEND_MSEC); 166 166 if (rc < 0) { ··· 171 171 continue; 172 172 } 173 173 174 - if (pollepd.revents & POLLIN) 174 + if (pollepd.revents & EPOLLIN) 175 175 cosm_client_recv(); 176 176 177 177 msg.id = COSM_MSG_HEARTBEAT;
+12 -12
drivers/misc/mic/scif/scif_api.c
··· 1328 1328 if (ep->state == SCIFEP_CONNECTED || 1329 1329 ep->state == SCIFEP_DISCONNECTED || 1330 1330 ep->conn_err) 1331 - mask |= POLLOUT; 1331 + mask |= EPOLLOUT; 1332 1332 goto exit; 1333 1333 } 1334 1334 } ··· 1338 1338 _scif_poll_wait(f, &ep->conwq, wait, ep); 1339 1339 if (ep->state == SCIFEP_LISTENING) { 1340 1340 if (ep->conreqcnt) 1341 - mask |= POLLIN; 1341 + mask |= EPOLLIN; 1342 1342 goto exit; 1343 1343 } 1344 1344 } 1345 1345 1346 1346 /* Endpoint is connected or disconnected */ 1347 1347 if (ep->state == SCIFEP_CONNECTED || ep->state == SCIFEP_DISCONNECTED) { 1348 - if (poll_requested_events(wait) & POLLIN) 1348 + if (poll_requested_events(wait) & EPOLLIN) 1349 1349 _scif_poll_wait(f, &ep->recvwq, wait, ep); 1350 - if (poll_requested_events(wait) & POLLOUT) 1350 + if (poll_requested_events(wait) & EPOLLOUT) 1351 1351 _scif_poll_wait(f, &ep->sendwq, wait, ep); 1352 1352 if (ep->state == SCIFEP_CONNECTED || 1353 1353 ep->state == SCIFEP_DISCONNECTED) { 1354 1354 /* Data can be read without blocking */ 1355 1355 if (scif_rb_count(&ep->qp_info.qp->inbound_q, 1)) 1356 - mask |= POLLIN; 1356 + mask |= EPOLLIN; 1357 1357 /* Data can be written without blocking */ 1358 1358 if (scif_rb_space(&ep->qp_info.qp->outbound_q)) 1359 - mask |= POLLOUT; 1360 - /* Return POLLHUP if endpoint is disconnected */ 1359 + mask |= EPOLLOUT; 1360 + /* Return EPOLLHUP if endpoint is disconnected */ 1361 1361 if (ep->state == SCIFEP_DISCONNECTED) 1362 - mask |= POLLHUP; 1362 + mask |= EPOLLHUP; 1363 1363 goto exit; 1364 1364 } 1365 1365 } 1366 1366 1367 - /* Return POLLERR if the endpoint is in none of the above states */ 1368 - mask |= POLLERR; 1367 + /* Return EPOLLERR if the endpoint is in none of the above states */ 1368 + mask |= EPOLLERR; 1369 1369 exit: 1370 1370 spin_unlock(&ep->lock); 1371 1371 return mask; ··· 1398 1398 pt = &table.pt; 1399 1399 while (1) { 1400 1400 for (i = 0; i < nfds; i++) { 1401 - pt->_key = ufds[i].events | POLLERR | POLLHUP; 1401 + pt->_key = ufds[i].events | EPOLLERR | EPOLLHUP; 1402 1402 mask = __scif_pollfd(ufds[i].epd->anon, 1403 1403 pt, ufds[i].epd); 1404 - mask &= ufds[i].events | POLLERR | POLLHUP; 1404 + mask &= ufds[i].events | EPOLLERR | EPOLLHUP; 1405 1405 if (mask) { 1406 1406 count++; 1407 1407 pt->_qproc = NULL;
+4 -4
drivers/misc/mic/vop/vop_vringh.c
··· 1010 1010 } 1011 1011 1012 1012 /* 1013 - * We return POLLIN | POLLOUT from poll when new buffers are enqueued, and 1013 + * We return EPOLLIN | EPOLLOUT from poll when new buffers are enqueued, and 1014 1014 * not when previously enqueued buffers may be available. This means that 1015 1015 * in the card->host (TX) path, when userspace is unblocked by poll it 1016 1016 * must drain all available descriptors or it can stall. ··· 1022 1022 1023 1023 mutex_lock(&vdev->vdev_mutex); 1024 1024 if (vop_vdev_inited(vdev)) { 1025 - mask = POLLERR; 1025 + mask = EPOLLERR; 1026 1026 goto done; 1027 1027 } 1028 1028 poll_wait(f, &vdev->waitq, wait); 1029 1029 if (vop_vdev_inited(vdev)) { 1030 - mask = POLLERR; 1030 + mask = EPOLLERR; 1031 1031 } else if (vdev->poll_wake) { 1032 1032 vdev->poll_wake = 0; 1033 - mask = POLLIN | POLLOUT; 1033 + mask = EPOLLIN | EPOLLOUT; 1034 1034 } 1035 1035 done: 1036 1036 mutex_unlock(&vdev->vdev_mutex);
+2 -2
drivers/misc/ocxl/file.c
··· 215 215 mutex_unlock(&ctx->status_mutex); 216 216 217 217 if (afu_events_pending(ctx)) 218 - mask = POLLIN | POLLRDNORM; 218 + mask = EPOLLIN | EPOLLRDNORM; 219 219 else if (closed) 220 - mask = POLLERR; 220 + mask = EPOLLERR; 221 221 222 222 return mask; 223 223 }
+2 -2
drivers/misc/phantom.c
··· 265 265 poll_wait(file, &dev->wait, wait); 266 266 267 267 if (!(dev->status & PHB_RUNNING)) 268 - mask = POLLERR; 268 + mask = EPOLLERR; 269 269 else if (atomic_read(&dev->counter)) 270 - mask = POLLIN | POLLRDNORM; 270 + mask = EPOLLIN | EPOLLRDNORM; 271 271 272 272 pr_debug("phantom_poll end: %x/%d\n", mask, atomic_read(&dev->counter)); 273 273
+1 -1
drivers/misc/vmw_vmci/vmci_host.c
··· 182 182 if (context->pending_datagrams > 0 || 183 183 vmci_handle_arr_get_size( 184 184 context->pending_doorbell_array) > 0) { 185 - mask = POLLIN; 185 + mask = EPOLLIN; 186 186 } 187 187 spin_unlock(&context->lock); 188 188 }
+2 -2
drivers/net/ieee802154/ca8210.c
··· 2648 2648 2649 2649 poll_wait(filp, &priv->test.readq, ptable); 2650 2650 if (!kfifo_is_empty(&priv->test.up_fifo)) 2651 - return_flags |= (POLLIN | POLLRDNORM); 2651 + return_flags |= (EPOLLIN | EPOLLRDNORM); 2652 2652 if (wait_event_interruptible( 2653 2653 priv->test.readq, 2654 2654 !kfifo_is_empty(&priv->test.up_fifo))) { 2655 - return POLLERR; 2655 + return EPOLLERR; 2656 2656 } 2657 2657 return return_flags; 2658 2658 }
+4 -4
drivers/net/ppp/ppp_generic.c
··· 539 539 if (!pf) 540 540 return 0; 541 541 poll_wait(file, &pf->rwait, wait); 542 - mask = POLLOUT | POLLWRNORM; 542 + mask = EPOLLOUT | EPOLLWRNORM; 543 543 if (skb_peek(&pf->rq)) 544 - mask |= POLLIN | POLLRDNORM; 544 + mask |= EPOLLIN | EPOLLRDNORM; 545 545 if (pf->dead) 546 - mask |= POLLHUP; 546 + mask |= EPOLLHUP; 547 547 else if (pf->kind == INTERFACE) { 548 548 /* see comment in ppp_read */ 549 549 struct ppp *ppp = PF_TO_PPP(pf); ··· 551 551 ppp_recv_lock(ppp); 552 552 if (ppp->n_channels == 0 && 553 553 (ppp->flags & SC_LOOP_TRAFFIC) == 0) 554 - mask |= POLLIN | POLLRDNORM; 554 + mask |= EPOLLIN | EPOLLRDNORM; 555 555 ppp_recv_unlock(ppp); 556 556 } 557 557
+5 -5
drivers/net/tap.c
··· 377 377 } 378 378 379 379 wake_up: 380 - wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); 380 + wake_up_interruptible_poll(sk_sleep(&q->sk), EPOLLIN | EPOLLRDNORM | EPOLLRDBAND); 381 381 return RX_HANDLER_CONSUMED; 382 382 383 383 drop: ··· 487 487 488 488 wqueue = sk_sleep(sk); 489 489 if (wqueue && waitqueue_active(wqueue)) 490 - wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND); 490 + wake_up_interruptible_poll(wqueue, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); 491 491 } 492 492 493 493 static void tap_sock_destruct(struct sock *sk) ··· 572 572 static __poll_t tap_poll(struct file *file, poll_table *wait) 573 573 { 574 574 struct tap_queue *q = file->private_data; 575 - __poll_t mask = POLLERR; 575 + __poll_t mask = EPOLLERR; 576 576 577 577 if (!q) 578 578 goto out; ··· 581 581 poll_wait(file, &q->wq.wait, wait); 582 582 583 583 if (!ptr_ring_empty(&q->ring)) 584 - mask |= POLLIN | POLLRDNORM; 584 + mask |= EPOLLIN | EPOLLRDNORM; 585 585 586 586 if (sock_writeable(&q->sk) || 587 587 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) && 588 588 sock_writeable(&q->sk))) 589 - mask |= POLLOUT | POLLWRNORM; 589 + mask |= EPOLLOUT | EPOLLWRNORM; 590 590 591 591 out: 592 592 return mask;
+6 -6
drivers/net/tun.c
··· 1437 1437 __poll_t mask = 0; 1438 1438 1439 1439 if (!tun) 1440 - return POLLERR; 1440 + return EPOLLERR; 1441 1441 1442 1442 sk = tfile->socket.sk; 1443 1443 ··· 1446 1446 poll_wait(file, sk_sleep(sk), wait); 1447 1447 1448 1448 if (!ptr_ring_empty(&tfile->tx_ring)) 1449 - mask |= POLLIN | POLLRDNORM; 1449 + mask |= EPOLLIN | EPOLLRDNORM; 1450 1450 1451 1451 if (tun->dev->flags & IFF_UP && 1452 1452 (sock_writeable(sk) || 1453 1453 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1454 1454 sock_writeable(sk)))) 1455 - mask |= POLLOUT | POLLWRNORM; 1455 + mask |= EPOLLOUT | EPOLLWRNORM; 1456 1456 1457 1457 if (tun->dev->reg_state != NETREG_REGISTERED) 1458 - mask = POLLERR; 1458 + mask = EPOLLERR; 1459 1459 1460 1460 tun_put(tun); 1461 1461 return mask; ··· 2310 2310 2311 2311 wqueue = sk_sleep(sk); 2312 2312 if (wqueue && waitqueue_active(wqueue)) 2313 - wake_up_interruptible_sync_poll(wqueue, POLLOUT | 2314 - POLLWRNORM | POLLWRBAND); 2313 + wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | 2314 + EPOLLWRNORM | EPOLLWRBAND); 2315 2315 2316 2316 tfile = container_of(sk, struct tun_file, sk); 2317 2317 kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
+1 -1
drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
··· 309 309 poll_wait(file, &intf->frame_dump_waitqueue, wait); 310 310 311 311 if (!skb_queue_empty(&intf->frame_dump_skbqueue)) 312 - return POLLOUT | POLLWRNORM; 312 + return EPOLLOUT | EPOLLWRNORM; 313 313 314 314 return 0; 315 315 }
+3 -3
drivers/pci/switch/switchtec.c
··· 511 511 poll_wait(filp, &stdev->event_wq, wait); 512 512 513 513 if (lock_mutex_and_test_alive(stdev)) 514 - return POLLIN | POLLRDHUP | POLLOUT | POLLERR | POLLHUP; 514 + return EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLERR | EPOLLHUP; 515 515 516 516 mutex_unlock(&stdev->mrpc_mutex); 517 517 518 518 if (try_wait_for_completion(&stuser->comp)) 519 - ret |= POLLIN | POLLRDNORM; 519 + ret |= EPOLLIN | EPOLLRDNORM; 520 520 521 521 if (stuser->event_cnt != atomic_read(&stdev->event_cnt)) 522 - ret |= POLLPRI | POLLRDBAND; 522 + ret |= EPOLLPRI | EPOLLRDBAND; 523 523 524 524 return ret; 525 525 }
+1 -1
drivers/platform/chrome/cros_ec_debugfs.c
··· 200 200 if (CIRC_CNT(debug_info->log_buffer.head, 201 201 debug_info->log_buffer.tail, 202 202 LOG_SIZE)) 203 - mask |= POLLIN | POLLRDNORM; 203 + mask |= EPOLLIN | EPOLLRDNORM; 204 204 mutex_unlock(&debug_info->log_mutex); 205 205 206 206 return mask;
+4 -4
drivers/platform/goldfish/goldfish_pipe.c
··· 549 549 return -ERESTARTSYS; 550 550 551 551 if (status & PIPE_POLL_IN) 552 - mask |= POLLIN | POLLRDNORM; 552 + mask |= EPOLLIN | EPOLLRDNORM; 553 553 if (status & PIPE_POLL_OUT) 554 - mask |= POLLOUT | POLLWRNORM; 554 + mask |= EPOLLOUT | EPOLLWRNORM; 555 555 if (status & PIPE_POLL_HUP) 556 - mask |= POLLHUP; 556 + mask |= EPOLLHUP; 557 557 if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) 558 - mask |= POLLERR; 558 + mask |= EPOLLERR; 559 559 560 560 return mask; 561 561 }
+1 -1
drivers/platform/x86/sony-laptop.c
··· 4128 4128 { 4129 4129 poll_wait(file, &sonypi_compat.fifo_proc_list, wait); 4130 4130 if (kfifo_len(&sonypi_compat.fifo)) 4131 - return POLLIN | POLLRDNORM; 4131 + return EPOLLIN | EPOLLRDNORM; 4132 4132 return 0; 4133 4133 } 4134 4134
+1 -1
drivers/pps/pps.c
··· 55 55 56 56 poll_wait(file, &pps->queue, wait); 57 57 58 - return POLLIN | POLLRDNORM; 58 + return EPOLLIN | EPOLLRDNORM; 59 59 } 60 60 61 61 static int pps_cdev_fasync(int fd, struct file *file, int on)
+1 -1
drivers/ptp/ptp_chardev.c
··· 286 286 287 287 poll_wait(fp, &ptp->tsev_wq, wait); 288 288 289 - return queue_cnt(&ptp->tsevq) ? POLLIN : 0; 289 + return queue_cnt(&ptp->tsevq) ? EPOLLIN : 0; 290 290 } 291 291 292 292 #define EXTTS_BUFSIZE (PTP_BUF_TIMESTAMPS * sizeof(struct ptp_extts_event))
+1 -1
drivers/rapidio/devices/rio_mport_cdev.c
··· 2325 2325 2326 2326 poll_wait(filp, &priv->event_rx_wait, wait); 2327 2327 if (kfifo_len(&priv->event_fifo)) 2328 - return POLLIN | POLLRDNORM; 2328 + return EPOLLIN | EPOLLRDNORM; 2329 2329 2330 2330 return 0; 2331 2331 }
+1 -1
drivers/rpmsg/qcom_smd.c
··· 967 967 poll_wait(filp, &channel->fblockread_event, wait); 968 968 969 969 if (qcom_smd_get_tx_avail(channel) > 20) 970 - mask |= POLLOUT | POLLWRNORM; 970 + mask |= EPOLLOUT | EPOLLWRNORM; 971 971 972 972 return mask; 973 973 }
+2 -2
drivers/rpmsg/rpmsg_char.c
··· 262 262 __poll_t mask = 0; 263 263 264 264 if (!eptdev->ept) 265 - return POLLERR; 265 + return EPOLLERR; 266 266 267 267 poll_wait(filp, &eptdev->readq, wait); 268 268 269 269 if (!skb_queue_empty(&eptdev->queue)) 270 - mask |= POLLIN | POLLRDNORM; 270 + mask |= EPOLLIN | EPOLLRDNORM; 271 271 272 272 mask |= rpmsg_poll(eptdev->ept, filp, wait); 273 273
+1 -1
drivers/rtc/rtc-dev.c
··· 203 203 204 204 data = rtc->irq_data; 205 205 206 - return (data != 0) ? (POLLIN | POLLRDNORM) : 0; 206 + return (data != 0) ? (EPOLLIN | EPOLLRDNORM) : 0; 207 207 } 208 208 209 209 static long rtc_dev_ioctl(struct file *file,
+1 -1
drivers/s390/block/dasd_eer.c
··· 671 671 poll_wait(filp, &dasd_eer_read_wait_queue, ptable); 672 672 spin_lock_irqsave(&bufferlock, flags); 673 673 if (eerb->head != eerb->tail) 674 - mask = POLLIN | POLLRDNORM ; 674 + mask = EPOLLIN | EPOLLRDNORM ; 675 675 else 676 676 mask = 0; 677 677 spin_unlock_irqrestore(&bufferlock, flags);
+2 -2
drivers/s390/char/monreader.c
··· 435 435 436 436 poll_wait(filp, &mon_read_wait_queue, p); 437 437 if (unlikely(atomic_read(&monpriv->iucv_severed))) 438 - return POLLERR; 438 + return EPOLLERR; 439 439 if (atomic_read(&monpriv->read_ready)) 440 - return POLLIN | POLLRDNORM; 440 + return EPOLLIN | EPOLLRDNORM; 441 441 return 0; 442 442 } 443 443
+1 -1
drivers/scsi/megaraid/megaraid_sas_base.c
··· 7041 7041 poll_wait(file, &megasas_poll_wait, wait); 7042 7042 spin_lock_irqsave(&poll_aen_lock, flags); 7043 7043 if (megasas_poll_wait_aen) 7044 - mask = (POLLIN | POLLRDNORM); 7044 + mask = (EPOLLIN | EPOLLRDNORM); 7045 7045 else 7046 7046 mask = 0; 7047 7047 megasas_poll_wait_aen = 0;
+1 -1
drivers/scsi/mpt3sas/mpt3sas_ctl.c
··· 546 546 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 547 547 if (ioc->aen_event_read_flag) { 548 548 spin_unlock(&gioc_lock); 549 - return POLLIN | POLLRDNORM; 549 + return EPOLLIN | EPOLLRDNORM; 550 550 } 551 551 } 552 552 spin_unlock(&gioc_lock);
+6 -6
drivers/scsi/sg.c
··· 1152 1152 1153 1153 sfp = filp->private_data; 1154 1154 if (!sfp) 1155 - return POLLERR; 1155 + return EPOLLERR; 1156 1156 sdp = sfp->parentdp; 1157 1157 if (!sdp) 1158 - return POLLERR; 1158 + return EPOLLERR; 1159 1159 poll_wait(filp, &sfp->read_wait, wait); 1160 1160 read_lock_irqsave(&sfp->rq_list_lock, iflags); 1161 1161 list_for_each_entry(srp, &sfp->rq_list, entry) { 1162 1162 /* if any read waiting, flag it */ 1163 1163 if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned)) 1164 - res = POLLIN | POLLRDNORM; 1164 + res = EPOLLIN | EPOLLRDNORM; 1165 1165 ++count; 1166 1166 } 1167 1167 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 1168 1168 1169 1169 if (atomic_read(&sdp->detaching)) 1170 - res |= POLLHUP; 1170 + res |= EPOLLHUP; 1171 1171 else if (!sfp->cmd_q) { 1172 1172 if (0 == count) 1173 - res |= POLLOUT | POLLWRNORM; 1173 + res |= EPOLLOUT | EPOLLWRNORM; 1174 1174 } else if (count < SG_MAX_QUEUE) 1175 - res |= POLLOUT | POLLWRNORM; 1175 + res |= EPOLLOUT | EPOLLWRNORM; 1176 1176 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, 1177 1177 "sg_poll: res=0x%x\n", (__force u32) res)); 1178 1178 return res;
+2 -2
drivers/staging/comedi/comedi_fops.c
··· 2288 2288 if (s->busy != file || !comedi_is_subdevice_running(s) || 2289 2289 (s->async->cmd.flags & CMDF_WRITE) || 2290 2290 comedi_buf_read_n_available(s) > 0) 2291 - mask |= POLLIN | POLLRDNORM; 2291 + mask |= EPOLLIN | EPOLLRDNORM; 2292 2292 } 2293 2293 2294 2294 s = comedi_file_write_subdevice(file); ··· 2300 2300 if (s->busy != file || !comedi_is_subdevice_running(s) || 2301 2301 !(s->async->cmd.flags & CMDF_WRITE) || 2302 2302 comedi_buf_write_n_available(s) >= bps) 2303 - mask |= POLLOUT | POLLWRNORM; 2303 + mask |= EPOLLOUT | EPOLLWRNORM; 2304 2304 } 2305 2305 2306 2306 done:
+2 -2
drivers/staging/comedi/drivers/serial2002.c
··· 114 114 __poll_t mask; 115 115 116 116 mask = f->f_op->poll(f, &table.pt); 117 - if (mask & (POLLRDNORM | POLLRDBAND | POLLIN | 118 - POLLHUP | POLLERR)) { 117 + if (mask & (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | 118 + EPOLLHUP | EPOLLERR)) { 119 119 break; 120 120 } 121 121 now = ktime_get();
+1 -1
drivers/staging/fwserial/fwserial.c
··· 37 37 /* 38 38 * Threshold below which the tty is woken for writing 39 39 * - should be equal to WAKEUP_CHARS in drivers/tty/n_tty.c because 40 - * even if the writer is woken, n_tty_poll() won't set POLLOUT until 40 + * even if the writer is woken, n_tty_poll() won't set EPOLLOUT until 41 41 * our fifo is below this level 42 42 */ 43 43 #define WAKEUP_CHARS 256
+2 -2
drivers/staging/greybus/tools/loopback_test.c
··· 663 663 goto err; 664 664 } 665 665 read(t->fds[fds_idx].fd, &dummy, 1); 666 - t->fds[fds_idx].events = POLLERR|POLLPRI; 666 + t->fds[fds_idx].events = EPOLLERR|EPOLLPRI; 667 667 t->fds[fds_idx].revents = 0; 668 668 fds_idx++; 669 669 } ··· 756 756 } 757 757 758 758 for (i = 0; i < t->poll_count; i++) { 759 - if (t->fds[i].revents & POLLPRI) { 759 + if (t->fds[i].revents & EPOLLPRI) { 760 760 /* Dummy read to clear the event */ 761 761 read(t->fds[i].fd, &dummy, 1); 762 762 number_of_events++;
+7 -7
drivers/staging/irda/net/af_irda.c
··· 1749 1749 1750 1750 /* Exceptional events? */ 1751 1751 if (sk->sk_err) 1752 - mask |= POLLERR; 1752 + mask |= EPOLLERR; 1753 1753 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1754 1754 pr_debug("%s(), POLLHUP\n", __func__); 1755 - mask |= POLLHUP; 1755 + mask |= EPOLLHUP; 1756 1756 } 1757 1757 1758 1758 /* Readable? */ 1759 1759 if (!skb_queue_empty(&sk->sk_receive_queue)) { 1760 1760 pr_debug("Socket is readable\n"); 1761 - mask |= POLLIN | POLLRDNORM; 1761 + mask |= EPOLLIN | EPOLLRDNORM; 1762 1762 } 1763 1763 1764 1764 /* Connection-based need to check for termination and startup */ ··· 1766 1766 case SOCK_STREAM: 1767 1767 if (sk->sk_state == TCP_CLOSE) { 1768 1768 pr_debug("%s(), POLLHUP\n", __func__); 1769 - mask |= POLLHUP; 1769 + mask |= EPOLLHUP; 1770 1770 } 1771 1771 1772 1772 if (sk->sk_state == TCP_ESTABLISHED) { 1773 1773 if ((self->tx_flow == FLOW_START) && 1774 1774 sock_writeable(sk)) 1775 1775 { 1776 - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 1776 + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 1777 1777 } 1778 1778 } 1779 1779 break; ··· 1781 1781 if ((self->tx_flow == FLOW_START) && 1782 1782 sock_writeable(sk)) 1783 1783 { 1784 - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 1784 + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 1785 1785 } 1786 1786 break; 1787 1787 case SOCK_DGRAM: 1788 1788 if (sock_writeable(sk)) 1789 - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 1789 + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 1790 1790 break; 1791 1791 default: 1792 1792 break;
+4 -4
drivers/staging/irda/net/irnet/irnet_ppp.c
··· 429 429 DENTER(CTRL_TRACE, "(ap=0x%p)\n", ap); 430 430 431 431 poll_wait(file, &irnet_events.rwait, wait); 432 - mask = POLLOUT | POLLWRNORM; 432 + mask = EPOLLOUT | EPOLLWRNORM; 433 433 /* If there is unread events */ 434 434 if(ap->event_index != irnet_events.index) 435 - mask |= POLLIN | POLLRDNORM; 435 + mask |= EPOLLIN | EPOLLRDNORM; 436 436 #ifdef INITIAL_DISCOVERY 437 437 if(ap->disco_number != -1) 438 438 { ··· 441 441 irnet_get_discovery_log(ap); 442 442 /* Recheck */ 443 443 if(ap->disco_number != -1) 444 - mask |= POLLIN | POLLRDNORM; 444 + mask |= EPOLLIN | EPOLLRDNORM; 445 445 } 446 446 #endif /* INITIAL_DISCOVERY */ 447 447 ··· 618 618 DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n", 619 619 file, ap); 620 620 621 - mask = POLLOUT | POLLWRNORM; 621 + mask = EPOLLOUT | EPOLLWRNORM; 622 622 DABORT(ap == NULL, mask, FS_ERROR, "ap is NULL !!!\n"); 623 623 624 624 /* If we are connected to ppp_generic, let it handle the job */
+1 -1
drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
··· 1265 1265 rt_mutex_lock(&isp->mutex); 1266 1266 if (pipe->capq.streaming != 1) { 1267 1267 rt_mutex_unlock(&isp->mutex); 1268 - return POLLERR; 1268 + return EPOLLERR; 1269 1269 } 1270 1270 rt_mutex_unlock(&isp->mutex); 1271 1271
+1 -1
drivers/staging/media/bcm2048/radio-bcm2048.c
··· 2183 2183 poll_wait(file, &bdev->read_queue, pts); 2184 2184 2185 2185 if (bdev->rds_data_available) 2186 - retval = POLLIN | POLLRDNORM; 2186 + retval = EPOLLIN | EPOLLRDNORM; 2187 2187 2188 2188 return retval; 2189 2189 }
+2 -2
drivers/staging/most/cdev/cdev.c
··· 292 292 293 293 if (c->cfg->direction == MOST_CH_RX) { 294 294 if (!kfifo_is_empty(&c->fifo)) 295 - mask |= POLLIN | POLLRDNORM; 295 + mask |= EPOLLIN | EPOLLRDNORM; 296 296 } else { 297 297 if (!kfifo_is_empty(&c->fifo) || ch_has_mbo(c)) 298 - mask |= POLLOUT | POLLWRNORM; 298 + mask |= EPOLLOUT | EPOLLWRNORM; 299 299 } 300 300 return mask; 301 301 }
+1 -1
drivers/staging/most/video/video.c
··· 213 213 if (!data_ready(mdev)) 214 214 poll_wait(filp, &mdev->wait_data, wait); 215 215 if (data_ready(mdev)) 216 - mask |= POLLIN | POLLRDNORM; 216 + mask |= EPOLLIN | EPOLLRDNORM; 217 217 218 218 return mask; 219 219 }
+1 -1
drivers/staging/speakup/speakup_soft.c
··· 325 325 326 326 spin_lock_irqsave(&speakup_info.spinlock, flags); 327 327 if (!synth_buffer_empty() || speakup_info.flushing) 328 - ret = POLLIN | POLLRDNORM; 328 + ret = EPOLLIN | EPOLLRDNORM; 329 329 spin_unlock_irqrestore(&speakup_info.spinlock, flags); 330 330 return ret; 331 331 }
+3 -3
drivers/tty/n_gsm.c
··· 2477 2477 poll_wait(file, &tty->read_wait, wait); 2478 2478 poll_wait(file, &tty->write_wait, wait); 2479 2479 if (tty_hung_up_p(file)) 2480 - mask |= POLLHUP; 2480 + mask |= EPOLLHUP; 2481 2481 if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0) 2482 - mask |= POLLOUT | POLLWRNORM; 2482 + mask |= EPOLLOUT | EPOLLWRNORM; 2483 2483 if (gsm->dead) 2484 - mask |= POLLHUP; 2484 + mask |= EPOLLHUP; 2485 2485 return mask; 2486 2486 } 2487 2487
+4 -4
drivers/tty/n_hdlc.c
··· 814 814 815 815 /* set bits for operations that won't block */ 816 816 if (!list_empty(&n_hdlc->rx_buf_list.list)) 817 - mask |= POLLIN | POLLRDNORM; /* readable */ 817 + mask |= EPOLLIN | EPOLLRDNORM; /* readable */ 818 818 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) 819 - mask |= POLLHUP; 819 + mask |= EPOLLHUP; 820 820 if (tty_hung_up_p(filp)) 821 - mask |= POLLHUP; 821 + mask |= EPOLLHUP; 822 822 if (!tty_is_writelocked(tty) && 823 823 !list_empty(&n_hdlc->tx_free_buf_list.list)) 824 - mask |= POLLOUT | POLLWRNORM; /* writable */ 824 + mask |= EPOLLOUT | EPOLLWRNORM; /* writable */ 825 825 } 826 826 return mask; 827 827 } /* end of n_hdlc_tty_poll() */
+2 -2
drivers/tty/n_r3964.c
··· 1223 1223 struct r3964_client_info *pClient; 1224 1224 struct r3964_message *pMsg = NULL; 1225 1225 unsigned long flags; 1226 - __poll_t result = POLLOUT; 1226 + __poll_t result = EPOLLOUT; 1227 1227 1228 1228 TRACE_L("POLL"); 1229 1229 ··· 1234 1234 pMsg = pClient->first_msg; 1235 1235 spin_unlock_irqrestore(&pInfo->lock, flags); 1236 1236 if (pMsg) 1237 - result |= POLLIN | POLLRDNORM; 1237 + result |= EPOLLIN | EPOLLRDNORM; 1238 1238 } else { 1239 1239 result = -EINVAL; 1240 1240 }
+8 -8
drivers/tty/n_tty.c
··· 1344 1344 put_tty_queue(c, ldata); 1345 1345 smp_store_release(&ldata->canon_head, ldata->read_head); 1346 1346 kill_fasync(&tty->fasync, SIGIO, POLL_IN); 1347 - wake_up_interruptible_poll(&tty->read_wait, POLLIN); 1347 + wake_up_interruptible_poll(&tty->read_wait, EPOLLIN); 1348 1348 return 0; 1349 1349 } 1350 1350 } ··· 1625 1625 1626 1626 if (read_cnt(ldata)) { 1627 1627 kill_fasync(&tty->fasync, SIGIO, POLL_IN); 1628 - wake_up_interruptible_poll(&tty->read_wait, POLLIN); 1628 + wake_up_interruptible_poll(&tty->read_wait, EPOLLIN); 1629 1629 } 1630 1630 } 1631 1631 ··· 2376 2376 poll_wait(file, &tty->read_wait, wait); 2377 2377 poll_wait(file, &tty->write_wait, wait); 2378 2378 if (input_available_p(tty, 1)) 2379 - mask |= POLLIN | POLLRDNORM; 2379 + mask |= EPOLLIN | EPOLLRDNORM; 2380 2380 else { 2381 2381 tty_buffer_flush_work(tty->port); 2382 2382 if (input_available_p(tty, 1)) 2383 - mask |= POLLIN | POLLRDNORM; 2383 + mask |= EPOLLIN | EPOLLRDNORM; 2384 2384 } 2385 2385 if (tty->packet && tty->link->ctrl_status) 2386 - mask |= POLLPRI | POLLIN | POLLRDNORM; 2386 + mask |= EPOLLPRI | EPOLLIN | EPOLLRDNORM; 2387 2387 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) 2388 - mask |= POLLHUP; 2388 + mask |= EPOLLHUP; 2389 2389 if (tty_hung_up_p(file)) 2390 - mask |= POLLHUP; 2390 + mask |= EPOLLHUP; 2391 2391 if (tty->ops->write && !tty_is_writelocked(tty) && 2392 2392 tty_chars_in_buffer(tty) < WAKEUP_CHARS && 2393 2393 tty_write_room(tty) > 0) 2394 - mask |= POLLOUT | POLLWRNORM; 2394 + mask |= EPOLLOUT | EPOLLWRNORM; 2395 2395 return mask; 2396 2396 } 2397 2397
+2 -2
drivers/tty/pty.c
··· 344 344 tty->ctrl_status &= ~TIOCPKT_STOP; 345 345 tty->ctrl_status |= TIOCPKT_START; 346 346 spin_unlock_irqrestore(&tty->ctrl_lock, flags); 347 - wake_up_interruptible_poll(&tty->link->read_wait, POLLIN); 347 + wake_up_interruptible_poll(&tty->link->read_wait, EPOLLIN); 348 348 } 349 349 } 350 350 ··· 357 357 tty->ctrl_status &= ~TIOCPKT_START; 358 358 tty->ctrl_status |= TIOCPKT_STOP; 359 359 spin_unlock_irqrestore(&tty->ctrl_lock, flags); 360 - wake_up_interruptible_poll(&tty->link->read_wait, POLLIN); 360 + wake_up_interruptible_poll(&tty->link->read_wait, EPOLLIN); 361 361 } 362 362 } 363 363
+7 -7
drivers/tty/tty_io.c
··· 445 445 /* No kernel lock held - none needed ;) */ 446 446 static __poll_t hung_up_tty_poll(struct file *filp, poll_table *wait) 447 447 { 448 - return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM; 448 + return EPOLLIN | EPOLLOUT | EPOLLERR | EPOLLHUP | EPOLLRDNORM | EPOLLWRNORM; 449 449 } 450 450 451 451 static long hung_up_tty_ioctl(struct file *file, unsigned int cmd, ··· 533 533 tty_ldisc_deref(ld); 534 534 } 535 535 } 536 - wake_up_interruptible_poll(&tty->write_wait, POLLOUT); 536 + wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT); 537 537 } 538 538 539 539 EXPORT_SYMBOL_GPL(tty_wakeup); ··· 867 867 static void tty_write_unlock(struct tty_struct *tty) 868 868 { 869 869 mutex_unlock(&tty->atomic_write_lock); 870 - wake_up_interruptible_poll(&tty->write_wait, POLLOUT); 870 + wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT); 871 871 } 872 872 873 873 static int tty_write_lock(struct tty_struct *tty, int ndelay) ··· 1667 1667 1668 1668 if (tty->count <= 1) { 1669 1669 if (waitqueue_active(&tty->read_wait)) { 1670 - wake_up_poll(&tty->read_wait, POLLIN); 1670 + wake_up_poll(&tty->read_wait, EPOLLIN); 1671 1671 do_sleep++; 1672 1672 } 1673 1673 if (waitqueue_active(&tty->write_wait)) { 1674 - wake_up_poll(&tty->write_wait, POLLOUT); 1674 + wake_up_poll(&tty->write_wait, EPOLLOUT); 1675 1675 do_sleep++; 1676 1676 } 1677 1677 } 1678 1678 if (o_tty && o_tty->count <= 1) { 1679 1679 if (waitqueue_active(&o_tty->read_wait)) { 1680 - wake_up_poll(&o_tty->read_wait, POLLIN); 1680 + wake_up_poll(&o_tty->read_wait, EPOLLIN); 1681 1681 do_sleep++; 1682 1682 } 1683 1683 if (waitqueue_active(&o_tty->write_wait)) { 1684 - wake_up_poll(&o_tty->write_wait, POLLOUT); 1684 + wake_up_poll(&o_tty->write_wait, EPOLLOUT); 1685 1685 do_sleep++; 1686 1686 } 1687 1687 }
+2 -2
drivers/tty/tty_ldisc.c
··· 735 735 tty_ldisc_deref(ld); 736 736 } 737 737 738 - wake_up_interruptible_poll(&tty->write_wait, POLLOUT); 739 - wake_up_interruptible_poll(&tty->read_wait, POLLIN); 738 + wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT); 739 + wake_up_interruptible_poll(&tty->read_wait, EPOLLIN); 740 740 741 741 /* 742 742 * Shutdown the current line discipline, and reset it to
+1 -1
drivers/tty/vt/vc_screen.c
··· 563 563 vcs_poll(struct file *file, poll_table *wait) 564 564 { 565 565 struct vcs_poll_data *poll = vcs_poll_data_get(file); 566 - __poll_t ret = DEFAULT_POLLMASK|POLLERR|POLLPRI; 566 + __poll_t ret = DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI; 567 567 568 568 if (poll) { 569 569 poll_wait(file, &poll->waitq, wait);
+1 -1
drivers/uio/uio.c
··· 506 506 507 507 poll_wait(filep, &idev->wait, wait); 508 508 if (listener->event_count != atomic_read(&idev->event)) 509 - return POLLIN | POLLRDNORM; 509 + return EPOLLIN | EPOLLRDNORM; 510 510 return 0; 511 511 } 512 512
+4 -4
drivers/usb/class/cdc-wdm.c
··· 603 603 604 604 spin_lock_irqsave(&desc->iuspin, flags); 605 605 if (test_bit(WDM_DISCONNECTING, &desc->flags)) { 606 - mask = POLLHUP | POLLERR; 606 + mask = EPOLLHUP | EPOLLERR; 607 607 spin_unlock_irqrestore(&desc->iuspin, flags); 608 608 goto desc_out; 609 609 } 610 610 if (test_bit(WDM_READ, &desc->flags)) 611 - mask = POLLIN | POLLRDNORM; 611 + mask = EPOLLIN | EPOLLRDNORM; 612 612 if (desc->rerr || desc->werr) 613 - mask |= POLLERR; 613 + mask |= EPOLLERR; 614 614 if (!test_bit(WDM_IN_USE, &desc->flags)) 615 - mask |= POLLOUT | POLLWRNORM; 615 + mask |= EPOLLOUT | EPOLLWRNORM; 616 616 spin_unlock_irqrestore(&desc->iuspin, flags); 617 617 618 618 poll_wait(file, &desc->wait, wait);
+2 -2
drivers/usb/class/usblp.c
··· 479 479 poll_wait(file, &usblp->rwait, wait); 480 480 poll_wait(file, &usblp->wwait, wait); 481 481 spin_lock_irqsave(&usblp->lock, flags); 482 - ret = ((usblp->bidir && usblp->rcomplete) ? POLLIN | POLLRDNORM : 0) | 483 - ((usblp->no_paper || usblp->wcomplete) ? POLLOUT | POLLWRNORM : 0); 482 + ret = ((usblp->bidir && usblp->rcomplete) ? EPOLLIN | EPOLLRDNORM : 0) | 483 + ((usblp->no_paper || usblp->wcomplete) ? EPOLLOUT | EPOLLWRNORM : 0); 484 484 spin_unlock_irqrestore(&usblp->lock, flags); 485 485 return ret; 486 486 }
+2 -2
drivers/usb/class/usbtmc.c
··· 1265 1265 mutex_lock(&data->io_mutex); 1266 1266 1267 1267 if (data->zombie) { 1268 - mask = POLLHUP | POLLERR; 1268 + mask = EPOLLHUP | EPOLLERR; 1269 1269 goto no_poll; 1270 1270 } 1271 1271 1272 1272 poll_wait(file, &data->waitq, wait); 1273 1273 1274 - mask = (atomic_read(&data->srq_asserted)) ? POLLIN | POLLRDNORM : 0; 1274 + mask = (atomic_read(&data->srq_asserted)) ? EPOLLIN | EPOLLRDNORM : 0; 1275 1275 1276 1276 no_poll: 1277 1277 mutex_unlock(&data->io_mutex);
+1 -1
drivers/usb/core/devices.c
··· 632 632 event_count = atomic_read(&device_event.count); 633 633 if (file->f_version != event_count) { 634 634 file->f_version = event_count; 635 - return POLLIN | POLLRDNORM; 635 + return EPOLLIN | EPOLLRDNORM; 636 636 } 637 637 638 638 return 0;
+3 -3
drivers/usb/core/devio.c
··· 2578 2578 2579 2579 poll_wait(file, &ps->wait, wait); 2580 2580 if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed)) 2581 - mask |= POLLOUT | POLLWRNORM; 2581 + mask |= EPOLLOUT | EPOLLWRNORM; 2582 2582 if (!connected(ps)) 2583 - mask |= POLLHUP; 2583 + mask |= EPOLLHUP; 2584 2584 if (list_empty(&ps->list)) 2585 - mask |= POLLERR; 2585 + mask |= EPOLLERR; 2586 2586 return mask; 2587 2587 } 2588 2588
+4 -4
drivers/usb/gadget/function/f_fs.c
··· 644 644 static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait) 645 645 { 646 646 struct ffs_data *ffs = file->private_data; 647 - __poll_t mask = POLLWRNORM; 647 + __poll_t mask = EPOLLWRNORM; 648 648 int ret; 649 649 650 650 poll_wait(file, &ffs->ev.waitq, wait); ··· 656 656 switch (ffs->state) { 657 657 case FFS_READ_DESCRIPTORS: 658 658 case FFS_READ_STRINGS: 659 - mask |= POLLOUT; 659 + mask |= EPOLLOUT; 660 660 break; 661 661 662 662 case FFS_ACTIVE: 663 663 switch (ffs->setup_state) { 664 664 case FFS_NO_SETUP: 665 665 if (ffs->ev.count) 666 - mask |= POLLIN; 666 + mask |= EPOLLIN; 667 667 break; 668 668 669 669 case FFS_SETUP_PENDING: 670 670 case FFS_SETUP_CANCELLED: 671 - mask |= (POLLIN | POLLOUT); 671 + mask |= (EPOLLIN | EPOLLOUT); 672 672 break; 673 673 } 674 674 case FFS_CLOSING:
+2 -2
drivers/usb/gadget/function/f_hid.c
··· 422 422 poll_wait(file, &hidg->write_queue, wait); 423 423 424 424 if (WRITE_COND) 425 - ret |= POLLOUT | POLLWRNORM; 425 + ret |= EPOLLOUT | EPOLLWRNORM; 426 426 427 427 if (READ_COND) 428 - ret |= POLLIN | POLLRDNORM; 428 + ret |= EPOLLIN | EPOLLRDNORM; 429 429 430 430 return ret; 431 431 }
+2 -2
drivers/usb/gadget/function/f_printer.c
··· 698 698 699 699 spin_lock_irqsave(&dev->lock, flags); 700 700 if (likely(!list_empty(&dev->tx_reqs))) 701 - status |= POLLOUT | POLLWRNORM; 701 + status |= EPOLLOUT | EPOLLWRNORM; 702 702 703 703 if (likely(dev->current_rx_bytes) || 704 704 likely(!list_empty(&dev->rx_buffers))) 705 - status |= POLLIN | POLLRDNORM; 705 + status |= EPOLLIN | EPOLLRDNORM; 706 706 707 707 spin_unlock_irqrestore(&dev->lock, flags); 708 708
+3 -3
drivers/usb/gadget/legacy/inode.c
··· 1225 1225 /* report fd mode change before acting on it */ 1226 1226 if (dev->setup_abort) { 1227 1227 dev->setup_abort = 0; 1228 - mask = POLLHUP; 1228 + mask = EPOLLHUP; 1229 1229 goto out; 1230 1230 } 1231 1231 1232 1232 if (dev->state == STATE_DEV_SETUP) { 1233 1233 if (dev->setup_in || dev->setup_can_stall) 1234 - mask = POLLOUT; 1234 + mask = EPOLLOUT; 1235 1235 } else { 1236 1236 if (dev->ev_next != 0) 1237 - mask = POLLIN; 1237 + mask = EPOLLIN; 1238 1238 } 1239 1239 out: 1240 1240 spin_unlock_irq(&dev->lock);
+4 -4
drivers/usb/misc/iowarrior.c
··· 683 683 __poll_t mask = 0; 684 684 685 685 if (!dev->present) 686 - return POLLERR | POLLHUP; 686 + return EPOLLERR | EPOLLHUP; 687 687 688 688 poll_wait(file, &dev->read_wait, wait); 689 689 poll_wait(file, &dev->write_wait, wait); 690 690 691 691 if (!dev->present) 692 - return POLLERR | POLLHUP; 692 + return EPOLLERR | EPOLLHUP; 693 693 694 694 if (read_index(dev) != -1) 695 - mask |= POLLIN | POLLRDNORM; 695 + mask |= EPOLLIN | EPOLLRDNORM; 696 696 697 697 if (atomic_read(&dev->write_busy) < MAX_WRITES_IN_FLIGHT) 698 - mask |= POLLOUT | POLLWRNORM; 698 + mask |= EPOLLOUT | EPOLLWRNORM; 699 699 return mask; 700 700 } 701 701
+3 -3
drivers/usb/misc/ldusb.c
··· 417 417 dev = file->private_data; 418 418 419 419 if (!dev->intf) 420 - return POLLERR | POLLHUP; 420 + return EPOLLERR | EPOLLHUP; 421 421 422 422 poll_wait(file, &dev->read_wait, wait); 423 423 poll_wait(file, &dev->write_wait, wait); 424 424 425 425 if (dev->ring_head != dev->ring_tail) 426 - mask |= POLLIN | POLLRDNORM; 426 + mask |= EPOLLIN | EPOLLRDNORM; 427 427 if (!dev->interrupt_out_busy) 428 - mask |= POLLOUT | POLLWRNORM; 428 + mask |= EPOLLOUT | EPOLLWRNORM; 429 429 430 430 return mask; 431 431 }
+3 -3
drivers/usb/misc/legousbtower.c
··· 517 517 dev = file->private_data; 518 518 519 519 if (!dev->udev) 520 - return POLLERR | POLLHUP; 520 + return EPOLLERR | EPOLLHUP; 521 521 522 522 poll_wait(file, &dev->read_wait, wait); 523 523 poll_wait(file, &dev->write_wait, wait); 524 524 525 525 tower_check_for_read_packet(dev); 526 526 if (dev->read_packet_length > 0) { 527 - mask |= POLLIN | POLLRDNORM; 527 + mask |= EPOLLIN | EPOLLRDNORM; 528 528 } 529 529 if (!dev->interrupt_out_busy) { 530 - mask |= POLLOUT | POLLWRNORM; 530 + mask |= EPOLLOUT | EPOLLWRNORM; 531 531 } 532 532 533 533 return mask;
+1 -1
drivers/usb/mon/mon_bin.c
··· 1203 1203 1204 1204 spin_lock_irqsave(&rp->b_lock, flags); 1205 1205 if (!MON_RING_EMPTY(rp)) 1206 - mask |= POLLIN | POLLRDNORM; /* readable */ 1206 + mask |= EPOLLIN | EPOLLRDNORM; /* readable */ 1207 1207 spin_unlock_irqrestore(&rp->b_lock, flags); 1208 1208 return mask; 1209 1209 }
+4 -4
drivers/vfio/virqfd.c
··· 48 48 struct virqfd *virqfd = container_of(wait, struct virqfd, wait); 49 49 __poll_t flags = key_to_poll(key); 50 50 51 - if (flags & POLLIN) { 51 + if (flags & EPOLLIN) { 52 52 /* An event has been signaled, call function */ 53 53 if ((!virqfd->handler || 54 54 virqfd->handler(virqfd->opaque, virqfd->data)) && ··· 56 56 schedule_work(&virqfd->inject); 57 57 } 58 58 59 - if (flags & POLLHUP) { 59 + if (flags & EPOLLHUP) { 60 60 unsigned long flags; 61 61 spin_lock_irqsave(&virqfd_lock, flags); 62 62 ··· 172 172 * Check if there was an event already pending on the eventfd 173 173 * before we registered and trigger it as if we didn't miss it. 174 174 */ 175 - if (events & POLLIN) { 175 + if (events & EPOLLIN) { 176 176 if ((!handler || handler(opaque, data)) && thread) 177 177 schedule_work(&virqfd->inject); 178 178 } 179 179 180 180 /* 181 181 * Do not drop the file until the irqfd is fully initialized, 182 - * otherwise we might race against the POLLHUP. 182 + * otherwise we might race against the EPOLLHUP. 183 183 */ 184 184 fdput(irqfd); 185 185
+2 -2
drivers/vhost/net.c
··· 952 952 } 953 953 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); 954 954 955 - vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev); 956 - vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev); 955 + vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); 956 + vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev); 957 957 958 958 f->private_data = n; 959 959
+5 -5
drivers/vhost/vhost.c
··· 211 211 mask = file->f_op->poll(file, &poll->table); 212 212 if (mask) 213 213 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask)); 214 - if (mask & POLLERR) { 214 + if (mask & EPOLLERR) { 215 215 if (poll->wqh) 216 216 remove_wait_queue(poll->wqh, &poll->wait); 217 217 ret = -EINVAL; ··· 440 440 vhost_vq_reset(dev, vq); 441 441 if (vq->handle_kick) 442 442 vhost_poll_init(&vq->poll, vq->handle_kick, 443 - POLLIN, dev); 443 + EPOLLIN, dev); 444 444 } 445 445 } 446 446 EXPORT_SYMBOL_GPL(vhost_dev_init); ··· 630 630 vhost_umem_clean(dev->iotlb); 631 631 dev->iotlb = NULL; 632 632 vhost_clear_msg(dev); 633 - wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM); 633 + wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); 634 634 WARN_ON(!llist_empty(&dev->work_list)); 635 635 if (dev->worker) { 636 636 kthread_stop(dev->worker); ··· 1057 1057 poll_wait(file, &dev->wait, wait); 1058 1058 1059 1059 if (!list_empty(&dev->read_list)) 1060 - mask |= POLLIN | POLLRDNORM; 1060 + mask |= EPOLLIN | EPOLLRDNORM; 1061 1061 1062 1062 return mask; 1063 1063 } ··· 2356 2356 list_add_tail(&node->node, head); 2357 2357 spin_unlock(&dev->iotlb_lock); 2358 2358 2359 - wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM); 2359 + wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); 2360 2360 } 2361 2361 EXPORT_SYMBOL_GPL(vhost_enqueue_msg); 2362 2362
+1 -1
drivers/virt/fsl_hypervisor.c
··· 574 574 spin_lock_irqsave(&dbq->lock, flags); 575 575 576 576 poll_wait(filp, &dbq->wait, p); 577 - mask = (dbq->head == dbq->tail) ? 0 : (POLLIN | POLLRDNORM); 577 + mask = (dbq->head == dbq->tail) ? 0 : (EPOLLIN | EPOLLRDNORM); 578 578 579 579 spin_unlock_irqrestore(&dbq->lock, flags); 580 580
+3 -3
drivers/xen/evtchn.c
··· 623 623 624 624 static __poll_t evtchn_poll(struct file *file, poll_table *wait) 625 625 { 626 - __poll_t mask = POLLOUT | POLLWRNORM; 626 + __poll_t mask = EPOLLOUT | EPOLLWRNORM; 627 627 struct per_user_data *u = file->private_data; 628 628 629 629 poll_wait(file, &u->evtchn_wait, wait); 630 630 if (u->ring_cons != u->ring_prod) 631 - mask |= POLLIN | POLLRDNORM; 631 + mask |= EPOLLIN | EPOLLRDNORM; 632 632 if (u->ring_overflow) 633 - mask = POLLERR; 633 + mask = EPOLLERR; 634 634 return mask; 635 635 } 636 636
+1 -1
drivers/xen/mcelog.c
··· 144 144 poll_wait(file, &xen_mce_chrdev_wait, wait); 145 145 146 146 if (xen_mcelog.next) 147 - return POLLIN | POLLRDNORM; 147 + return EPOLLIN | EPOLLRDNORM; 148 148 149 149 return 0; 150 150 }
+7 -7
drivers/xen/pvcalls-front.c
··· 892 892 893 893 if (req_id != PVCALLS_INVALID_ID && 894 894 READ_ONCE(bedata->rsp[req_id].req_id) == req_id) 895 - return POLLIN | POLLRDNORM; 895 + return EPOLLIN | EPOLLRDNORM; 896 896 897 897 poll_wait(file, &map->passive.inflight_accept_req, wait); 898 898 return 0; ··· 900 900 901 901 if (test_and_clear_bit(PVCALLS_FLAG_POLL_RET, 902 902 (void *)&map->passive.flags)) 903 - return POLLIN | POLLRDNORM; 903 + return EPOLLIN | EPOLLRDNORM; 904 904 905 905 /* 906 906 * First check RET, then INFLIGHT. No barriers necessary to ··· 949 949 950 950 poll_wait(file, &map->active.inflight_conn_req, wait); 951 951 if (pvcalls_front_write_todo(map)) 952 - mask |= POLLOUT | POLLWRNORM; 952 + mask |= EPOLLOUT | EPOLLWRNORM; 953 953 if (pvcalls_front_read_todo(map)) 954 - mask |= POLLIN | POLLRDNORM; 954 + mask |= EPOLLIN | EPOLLRDNORM; 955 955 if (in_error != 0 || out_error != 0) 956 - mask |= POLLERR; 956 + mask |= EPOLLERR; 957 957 958 958 return mask; 959 959 } ··· 968 968 pvcalls_enter(); 969 969 if (!pvcalls_front_dev) { 970 970 pvcalls_exit(); 971 - return POLLNVAL; 971 + return EPOLLNVAL; 972 972 } 973 973 bedata = dev_get_drvdata(&pvcalls_front_dev->dev); 974 974 975 975 map = (struct sock_mapping *) sock->sk->sk_send_head; 976 976 if (!map) { 977 977 pvcalls_exit(); 978 - return POLLNVAL; 978 + return EPOLLNVAL; 979 979 } 980 980 if (map->active_socket) 981 981 ret = pvcalls_front_poll_active(file, bedata, map, wait);
+1 -1
drivers/xen/xenbus/xenbus_dev_frontend.c
··· 651 651 652 652 poll_wait(file, &u->read_waitq, wait); 653 653 if (!list_empty(&u->read_buffers)) 654 - return POLLIN | POLLRDNORM; 654 + return EPOLLIN | EPOLLRDNORM; 655 655 return 0; 656 656 } 657 657
+3 -3
fs/cachefiles/daemon.c
··· 289 289 290 290 /* 291 291 * poll for culling state 292 - * - use POLLOUT to indicate culling state 292 + * - use EPOLLOUT to indicate culling state 293 293 */ 294 294 static __poll_t cachefiles_daemon_poll(struct file *file, 295 295 struct poll_table_struct *poll) ··· 301 301 mask = 0; 302 302 303 303 if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags)) 304 - mask |= POLLIN; 304 + mask |= EPOLLIN; 305 305 306 306 if (test_bit(CACHEFILES_CULLING, &cache->flags)) 307 - mask |= POLLOUT; 307 + mask |= EPOLLOUT; 308 308 309 309 return mask; 310 310 }
+2 -2
fs/coda/psdev.c
··· 64 64 static __poll_t coda_psdev_poll(struct file *file, poll_table * wait) 65 65 { 66 66 struct venus_comm *vcp = (struct venus_comm *) file->private_data; 67 - __poll_t mask = POLLOUT | POLLWRNORM; 67 + __poll_t mask = EPOLLOUT | EPOLLWRNORM; 68 68 69 69 poll_wait(file, &vcp->vc_waitq, wait); 70 70 mutex_lock(&vcp->vc_mutex); 71 71 if (!list_empty(&vcp->vc_pending)) 72 - mask |= POLLIN | POLLRDNORM; 72 + mask |= EPOLLIN | EPOLLRDNORM; 73 73 mutex_unlock(&vcp->vc_mutex); 74 74 75 75 return mask;
+1 -1
fs/debugfs/file.c
··· 214 214 const struct file_operations *real_fops; 215 215 216 216 if (debugfs_file_get(dentry)) 217 - return POLLHUP; 217 + return EPOLLHUP; 218 218 219 219 real_fops = debugfs_real_fops(filp); 220 220 r = real_fops->poll(filp, wait);
+1 -1
fs/dlm/plock.c
··· 471 471 472 472 spin_lock(&ops_lock); 473 473 if (!list_empty(&send_list)) 474 - mask = POLLIN | POLLRDNORM; 474 + mask = EPOLLIN | EPOLLRDNORM; 475 475 spin_unlock(&ops_lock); 476 476 477 477 return mask;
+1 -1
fs/dlm/user.c
··· 896 896 spin_lock(&proc->asts_spin); 897 897 if (!list_empty(&proc->asts)) { 898 898 spin_unlock(&proc->asts_spin); 899 - return POLLIN | POLLRDNORM; 899 + return EPOLLIN | EPOLLRDNORM; 900 900 } 901 901 spin_unlock(&proc->asts_spin); 902 902 return 0;
+1 -1
fs/ecryptfs/miscdev.c
··· 59 59 poll_wait(file, &daemon->wait, pt); 60 60 mutex_lock(&daemon->mux); 61 61 if (!list_empty(&daemon->msg_ctx_out_queue)) 62 - mask |= POLLIN | POLLRDNORM; 62 + mask |= EPOLLIN | EPOLLRDNORM; 63 63 out_unlock_daemon: 64 64 daemon->flags &= ~ECRYPTFS_DAEMON_IN_POLL; 65 65 mutex_unlock(&daemon->mux);
+9 -9
fs/eventfd.c
··· 45 45 * 46 46 * This function is supposed to be called by the kernel in paths that do not 47 47 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX 48 - * value, and we signal this as overflow condition by returning a POLLERR 48 + * value, and we signal this as overflow condition by returning a EPOLLERR 49 49 * to poll(2). 50 50 * 51 51 * Returns the amount by which the counter was incremented. This will be less ··· 60 60 n = ULLONG_MAX - ctx->count; 61 61 ctx->count += n; 62 62 if (waitqueue_active(&ctx->wqh)) 63 - wake_up_locked_poll(&ctx->wqh, POLLIN); 63 + wake_up_locked_poll(&ctx->wqh, EPOLLIN); 64 64 spin_unlock_irqrestore(&ctx->wqh.lock, flags); 65 65 66 66 return n; ··· 96 96 { 97 97 struct eventfd_ctx *ctx = file->private_data; 98 98 99 - wake_up_poll(&ctx->wqh, POLLHUP); 99 + wake_up_poll(&ctx->wqh, EPOLLHUP); 100 100 eventfd_ctx_put(ctx); 101 101 return 0; 102 102 } ··· 150 150 count = READ_ONCE(ctx->count); 151 151 152 152 if (count > 0) 153 - events |= POLLIN; 153 + events |= EPOLLIN; 154 154 if (count == ULLONG_MAX) 155 - events |= POLLERR; 155 + events |= EPOLLERR; 156 156 if (ULLONG_MAX - 1 > count) 157 - events |= POLLOUT; 157 + events |= EPOLLOUT; 158 158 159 159 return events; 160 160 } ··· 187 187 eventfd_ctx_do_read(ctx, cnt); 188 188 __remove_wait_queue(&ctx->wqh, wait); 189 189 if (*cnt != 0 && waitqueue_active(&ctx->wqh)) 190 - wake_up_locked_poll(&ctx->wqh, POLLOUT); 190 + wake_up_locked_poll(&ctx->wqh, EPOLLOUT); 191 191 spin_unlock_irqrestore(&ctx->wqh.lock, flags); 192 192 193 193 return *cnt != 0 ? 0 : -EAGAIN; ··· 231 231 if (likely(res > 0)) { 232 232 eventfd_ctx_do_read(ctx, &ucnt); 233 233 if (waitqueue_active(&ctx->wqh)) 234 - wake_up_locked_poll(&ctx->wqh, POLLOUT); 234 + wake_up_locked_poll(&ctx->wqh, EPOLLOUT); 235 235 } 236 236 spin_unlock_irq(&ctx->wqh.lock); 237 237 ··· 281 281 if (likely(res > 0)) { 282 282 ctx->count += ucnt; 283 283 if (waitqueue_active(&ctx->wqh)) 284 - wake_up_locked_poll(&ctx->wqh, POLLIN); 284 + wake_up_locked_poll(&ctx->wqh, EPOLLIN); 285 285 } 286 286 spin_unlock_irq(&ctx->wqh.lock); 287 287
+11 -11
fs/eventpoll.c
··· 95 95 /* Epoll private bits inside the event mask */ 96 96 #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE) 97 97 98 - #define EPOLLINOUT_BITS (POLLIN | POLLOUT) 98 + #define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT) 99 99 100 - #define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | POLLERR | POLLHUP | \ 100 + #define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \ 101 101 EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE) 102 102 103 103 /* Maximum number of nesting allowed inside epoll sets */ ··· 555 555 wait_queue_head_t *wqueue = (wait_queue_head_t *)cookie; 556 556 557 557 spin_lock_irqsave_nested(&wqueue->lock, flags, call_nests + 1); 558 - wake_up_locked_poll(wqueue, POLLIN); 558 + wake_up_locked_poll(wqueue, EPOLLIN); 559 559 spin_unlock_irqrestore(&wqueue->lock, flags); 560 560 561 561 return 0; ··· 575 575 576 576 static void ep_poll_safewake(wait_queue_head_t *wq) 577 577 { 578 - wake_up_poll(wq, POLLIN); 578 + wake_up_poll(wq, EPOLLIN); 579 579 } 580 580 581 581 #endif ··· 908 908 909 909 list_for_each_entry_safe(epi, tmp, head, rdllink) { 910 910 if (ep_item_poll(epi, &pt, depth)) { 911 - return POLLIN | POLLRDNORM; 911 + return EPOLLIN | EPOLLRDNORM; 912 912 } else { 913 913 /* 914 914 * Item has been dropped into the ready list by the poll ··· 1181 1181 if ((epi->event.events & EPOLLEXCLUSIVE) && 1182 1182 !(pollflags & POLLFREE)) { 1183 1183 switch (pollflags & EPOLLINOUT_BITS) { 1184 - case POLLIN: 1185 - if (epi->event.events & POLLIN) 1184 + case EPOLLIN: 1185 + if (epi->event.events & EPOLLIN) 1186 1186 ewake = 1; 1187 1187 break; 1188 - case POLLOUT: 1189 - if (epi->event.events & POLLOUT) 1188 + case EPOLLOUT: 1189 + if (epi->event.events & EPOLLOUT) 1190 1190 ewake = 1; 1191 1191 break; 1192 1192 case 0: ··· 2105 2105 switch (op) { 2106 2106 case EPOLL_CTL_ADD: 2107 2107 if (!epi) { 2108 - epds.events |= POLLERR | POLLHUP; 2108 + epds.events |= EPOLLERR | EPOLLHUP; 2109 2109 error = ep_insert(ep, &epds, tf.file, fd, full_check); 2110 2110 } else 2111 2111 error = -EEXIST; ··· 2121 2121 case EPOLL_CTL_MOD: 2122 2122 if (epi) { 2123 2123 if (!(epi->event.events & EPOLLEXCLUSIVE)) { 2124 - epds.events |= POLLERR | POLLHUP; 2124 + epds.events |= EPOLLERR | EPOLLHUP; 2125 2125 error = ep_modify(ep, epi, &epds); 2126 2126 } 2127 2127 } else
+6 -6
fs/fcntl.c
··· 691 691 /* Table to convert sigio signal codes into poll band bitmaps */ 692 692 693 693 static const __poll_t band_table[NSIGPOLL] = { 694 - POLLIN | POLLRDNORM, /* POLL_IN */ 695 - POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */ 696 - POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */ 697 - POLLERR, /* POLL_ERR */ 698 - POLLPRI | POLLRDBAND, /* POLL_PRI */ 699 - POLLHUP | POLLERR /* POLL_HUP */ 694 + EPOLLIN | EPOLLRDNORM, /* POLL_IN */ 695 + EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND, /* POLL_OUT */ 696 + EPOLLIN | EPOLLRDNORM | EPOLLMSG, /* POLL_MSG */ 697 + EPOLLERR, /* POLL_ERR */ 698 + EPOLLPRI | EPOLLRDBAND, /* POLL_PRI */ 699 + EPOLLHUP | EPOLLERR /* POLL_HUP */ 700 700 }; 701 701 702 702 static inline int sigio_perm(struct task_struct *p,
+4 -4
fs/fuse/dev.c
··· 2006 2006 2007 2007 static __poll_t fuse_dev_poll(struct file *file, poll_table *wait) 2008 2008 { 2009 - __poll_t mask = POLLOUT | POLLWRNORM; 2009 + __poll_t mask = EPOLLOUT | EPOLLWRNORM; 2010 2010 struct fuse_iqueue *fiq; 2011 2011 struct fuse_dev *fud = fuse_get_dev(file); 2012 2012 2013 2013 if (!fud) 2014 - return POLLERR; 2014 + return EPOLLERR; 2015 2015 2016 2016 fiq = &fud->fc->iq; 2017 2017 poll_wait(file, &fiq->waitq, wait); 2018 2018 2019 2019 spin_lock(&fiq->waitq.lock); 2020 2020 if (!fiq->connected) 2021 - mask = POLLERR; 2021 + mask = EPOLLERR; 2022 2022 else if (request_pending(fiq)) 2023 - mask |= POLLIN | POLLRDNORM; 2023 + mask |= EPOLLIN | EPOLLRDNORM; 2024 2024 spin_unlock(&fiq->waitq.lock); 2025 2025 2026 2026 return mask;
+1 -1
fs/fuse/file.c
··· 2791 2791 fc->no_poll = 1; 2792 2792 return DEFAULT_POLLMASK; 2793 2793 } 2794 - return POLLERR; 2794 + return EPOLLERR; 2795 2795 } 2796 2796 EXPORT_SYMBOL_GPL(fuse_file_poll); 2797 2797
+2 -2
fs/kernfs/file.c
··· 823 823 * the content and then you use 'poll' or 'select' to wait for 824 824 * the content to change. When the content changes (assuming the 825 825 * manager for the kobject supports notification), poll will 826 - * return POLLERR|POLLPRI, and select will return the fd whether 826 + * return EPOLLERR|EPOLLPRI, and select will return the fd whether 827 827 * it is waiting for read, write, or exceptions. 828 828 * Once poll/select indicates that the value has changed, you 829 829 * need to close and re-open the file, or seek to 0 and read again. ··· 851 851 return DEFAULT_POLLMASK; 852 852 853 853 trigger: 854 - return DEFAULT_POLLMASK|POLLERR|POLLPRI; 854 + return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI; 855 855 } 856 856 857 857 static void kernfs_notify_workfn(struct work_struct *work)
+1 -1
fs/notify/fanotify/fanotify_user.c
··· 247 247 poll_wait(file, &group->notification_waitq, wait); 248 248 spin_lock(&group->notification_lock); 249 249 if (!fsnotify_notify_queue_is_empty(group)) 250 - ret = POLLIN | POLLRDNORM; 250 + ret = EPOLLIN | EPOLLRDNORM; 251 251 spin_unlock(&group->notification_lock); 252 252 253 253 return ret;
+1 -1
fs/notify/inotify/inotify_user.c
··· 115 115 poll_wait(file, &group->notification_waitq, wait); 116 116 spin_lock(&group->notification_lock); 117 117 if (!fsnotify_notify_queue_is_empty(group)) 118 - ret = POLLIN | POLLRDNORM; 118 + ret = EPOLLIN | EPOLLRDNORM; 119 119 spin_unlock(&group->notification_lock); 120 120 121 121 return ret;
+3 -3
fs/ocfs2/dlmfs/dlmfs.c
··· 71 71 * Over time, dlmfs has added some features that were not part of the 72 72 * initial ABI. Unfortunately, some of these features are not detectable 73 73 * via standard usage. For example, Linux's default poll always returns 74 - * POLLIN, so there is no way for a caller of poll(2) to know when dlmfs 74 + * EPOLLIN, so there is no way for a caller of poll(2) to know when dlmfs 75 75 * added poll support. Instead, we provide this list of new capabilities. 76 76 * 77 77 * Capabilities is a read-only attribute. We do it as a module parameter ··· 83 83 * interaction. 84 84 * 85 85 * Capabilities: 86 - * - bast : POLLIN against the file descriptor of a held lock 86 + * - bast : EPOLLIN against the file descriptor of a held lock 87 87 * signifies a bast fired on the lock. 88 88 */ 89 89 #define DLMFS_CAPABILITIES "bast stackglue" ··· 230 230 231 231 spin_lock(&ip->ip_lockres.l_lock); 232 232 if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED) 233 - event = POLLIN | POLLRDNORM; 233 + event = EPOLLIN | EPOLLRDNORM; 234 234 spin_unlock(&ip->ip_lockres.l_lock); 235 235 236 236 return event;
+1 -1
fs/orangefs/devorangefs-req.c
··· 823 823 poll_wait(file, &orangefs_request_list_waitq, poll_table); 824 824 825 825 if (!list_empty(&orangefs_request_list)) 826 - poll_revent_mask |= POLLIN; 826 + poll_revent_mask |= EPOLLIN; 827 827 return poll_revent_mask; 828 828 } 829 829
+11 -11
fs/pipe.c
··· 327 327 break; 328 328 } 329 329 if (do_wakeup) { 330 - wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM); 330 + wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM); 331 331 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 332 332 } 333 333 pipe_wait(pipe); ··· 336 336 337 337 /* Signal writers asynchronously that there is more room. */ 338 338 if (do_wakeup) { 339 - wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM); 339 + wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM); 340 340 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 341 341 } 342 342 if (ret > 0) ··· 463 463 break; 464 464 } 465 465 if (do_wakeup) { 466 - wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM); 466 + wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM); 467 467 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 468 468 do_wakeup = 0; 469 469 } ··· 474 474 out: 475 475 __pipe_unlock(pipe); 476 476 if (do_wakeup) { 477 - wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM); 477 + wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM); 478 478 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 479 479 } 480 480 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) { ··· 523 523 nrbufs = pipe->nrbufs; 524 524 mask = 0; 525 525 if (filp->f_mode & FMODE_READ) { 526 - mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0; 526 + mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0; 527 527 if (!pipe->writers && filp->f_version != pipe->w_counter) 528 - mask |= POLLHUP; 528 + mask |= EPOLLHUP; 529 529 } 530 530 531 531 if (filp->f_mode & FMODE_WRITE) { 532 - mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0; 532 + mask |= (nrbufs < pipe->buffers) ? EPOLLOUT | EPOLLWRNORM : 0; 533 533 /* 534 - * Most Unices do not set POLLERR for FIFOs but on Linux they 534 + * Most Unices do not set EPOLLERR for FIFOs but on Linux they 535 535 * behave exactly like pipes for poll(). 536 536 */ 537 537 if (!pipe->readers) 538 - mask |= POLLERR; 538 + mask |= EPOLLERR; 539 539 } 540 540 541 541 return mask; ··· 568 568 pipe->writers--; 569 569 570 570 if (pipe->readers || pipe->writers) { 571 - wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP); 571 + wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP); 572 572 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 573 573 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 574 574 } ··· 936 936 937 937 if (!is_pipe && !pipe->writers) { 938 938 if ((filp->f_flags & O_NONBLOCK)) { 939 - /* suppress POLLHUP until we have 939 + /* suppress EPOLLHUP until we have 940 940 * seen a writer */ 941 941 filp->f_version = pipe->w_counter; 942 942 } else {
+1 -1
fs/proc/kmsg.c
··· 44 44 { 45 45 poll_wait(file, &log_wait, wait); 46 46 if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC)) 47 - return POLLIN | POLLRDNORM; 47 + return EPOLLIN | EPOLLRDNORM; 48 48 return 0; 49 49 } 50 50
+2 -2
fs/proc/proc_sysctl.c
··· 640 640 641 641 /* sysctl was unregistered */ 642 642 if (IS_ERR(head)) 643 - return POLLERR | POLLHUP; 643 + return EPOLLERR | EPOLLHUP; 644 644 645 645 if (!table->proc_handler) 646 646 goto out; ··· 653 653 654 654 if (event != atomic_read(&table->poll->event)) { 655 655 filp->private_data = proc_sys_poll_event(table->poll); 656 - ret = POLLIN | POLLRDNORM | POLLERR | POLLPRI; 656 + ret = EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI; 657 657 } 658 658 659 659 out:
+2 -2
fs/proc_namespace.c
··· 23 23 struct seq_file *m = file->private_data; 24 24 struct proc_mounts *p = m->private; 25 25 struct mnt_namespace *ns = p->ns; 26 - __poll_t res = POLLIN | POLLRDNORM; 26 + __poll_t res = EPOLLIN | EPOLLRDNORM; 27 27 int event; 28 28 29 29 poll_wait(file, &p->ns->poll, wait); ··· 31 31 event = READ_ONCE(ns->event); 32 32 if (m->poll_event != event) { 33 33 m->poll_event = event; 34 - res |= POLLERR | POLLPRI; 34 + res |= EPOLLERR | EPOLLPRI; 35 35 } 36 36 37 37 return res;
+5 -5
fs/select.c
··· 432 432 return max; 433 433 } 434 434 435 - #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR) 436 - #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR) 437 - #define POLLEX_SET (POLLPRI) 435 + #define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR) 436 + #define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR) 437 + #define POLLEX_SET (EPOLLPRI) 438 438 439 439 static inline void wait_key_set(poll_table *wait, unsigned long in, 440 440 unsigned long out, unsigned long bit, ··· 814 814 fd = pollfd->fd; 815 815 if (fd >= 0) { 816 816 struct fd f = fdget(fd); 817 - mask = POLLNVAL; 817 + mask = EPOLLNVAL; 818 818 if (f.file) { 819 819 /* userland u16 ->events contains POLL... bitmap */ 820 820 __poll_t filter = demangle_poll(pollfd->events) | 821 - POLLERR | POLLHUP; 821 + EPOLLERR | EPOLLHUP; 822 822 mask = DEFAULT_POLLMASK; 823 823 if (f.file->f_op->poll) { 824 824 pwait->_key = filter;
+2 -2
fs/signalfd.c
··· 45 45 return; 46 46 47 47 /* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */ 48 - wake_up_poll(wqh, POLLHUP | POLLFREE); 48 + wake_up_poll(wqh, EPOLLHUP | POLLFREE); 49 49 } 50 50 51 51 struct signalfd_ctx { ··· 69 69 if (next_signal(&current->pending, &ctx->sigmask) || 70 70 next_signal(&current->signal->shared_pending, 71 71 &ctx->sigmask)) 72 - events |= POLLIN; 72 + events |= EPOLLIN; 73 73 spin_unlock_irq(&current->sighand->siglock); 74 74 75 75 return events;
+1 -1
fs/timerfd.c
··· 237 237 238 238 spin_lock_irqsave(&ctx->wqh.lock, flags); 239 239 if (ctx->ticks) 240 - events |= POLLIN; 240 + events |= EPOLLIN; 241 241 spin_unlock_irqrestore(&ctx->wqh.lock, flags); 242 242 243 243 return events;
+8 -8
fs/userfaultfd.c
··· 483 483 if (likely(must_wait && !READ_ONCE(ctx->released) && 484 484 (return_to_userland ? !signal_pending(current) : 485 485 !fatal_signal_pending(current)))) { 486 - wake_up_poll(&ctx->fd_wqh, POLLIN); 486 + wake_up_poll(&ctx->fd_wqh, EPOLLIN); 487 487 schedule(); 488 488 ret |= VM_FAULT_MAJOR; 489 489 ··· 614 614 615 615 spin_unlock(&ctx->event_wqh.lock); 616 616 617 - wake_up_poll(&ctx->fd_wqh, POLLIN); 617 + wake_up_poll(&ctx->fd_wqh, EPOLLIN); 618 618 schedule(); 619 619 620 620 spin_lock(&ctx->event_wqh.lock); ··· 904 904 /* Flush pending events that may still wait on event_wqh */ 905 905 wake_up_all(&ctx->event_wqh); 906 906 907 - wake_up_poll(&ctx->fd_wqh, POLLHUP); 907 + wake_up_poll(&ctx->fd_wqh, EPOLLHUP); 908 908 userfaultfd_ctx_put(ctx); 909 909 return 0; 910 910 } ··· 949 949 950 950 switch (ctx->state) { 951 951 case UFFD_STATE_WAIT_API: 952 - return POLLERR; 952 + return EPOLLERR; 953 953 case UFFD_STATE_RUNNING: 954 954 /* 955 955 * poll() never guarantees that read won't block. 956 956 * userfaults can be waken before they're read(). 957 957 */ 958 958 if (unlikely(!(file->f_flags & O_NONBLOCK))) 959 - return POLLERR; 959 + return EPOLLERR; 960 960 /* 961 961 * lockless access to see if there are pending faults 962 962 * __pollwait last action is the add_wait_queue but ··· 970 970 ret = 0; 971 971 smp_mb(); 972 972 if (waitqueue_active(&ctx->fault_pending_wqh)) 973 - ret = POLLIN; 973 + ret = EPOLLIN; 974 974 else if (waitqueue_active(&ctx->event_wqh)) 975 - ret = POLLIN; 975 + ret = EPOLLIN; 976 976 977 977 return ret; 978 978 default: 979 979 WARN_ON_ONCE(1); 980 - return POLLERR; 980 + return EPOLLERR; 981 981 } 982 982 } 983 983
+8 -8
include/linux/scif.h
··· 1266 1266 * events is a bitmask specifying the events which the application is 1267 1267 * interested in. The field revents is an output parameter, filled by the 1268 1268 * kernel with the events that actually occurred. The bits returned in revents 1269 - * can include any of those specified in events, or one of the values POLLERR, 1270 - * POLLHUP, or POLLNVAL. (These three bits are meaningless in the events 1269 + * can include any of those specified in events, or one of the values EPOLLERR, 1270 + * EPOLLHUP, or EPOLLNVAL. (These three bits are meaningless in the events 1271 1271 * field, and will be set in the revents field whenever the corresponding 1272 1272 * condition is true.) 1273 1273 * ··· 1279 1279 * timeout means an infinite timeout. 1280 1280 * 1281 1281 * The following bits may be set in events and returned in revents. 1282 - * POLLIN - Data may be received without blocking. For a connected 1282 + * EPOLLIN - Data may be received without blocking. For a connected 1283 1283 * endpoint, this means that scif_recv() may be called without blocking. For a 1284 1284 * listening endpoint, this means that scif_accept() may be called without 1285 1285 * blocking. 1286 - * POLLOUT - Data may be sent without blocking. For a connected endpoint, this 1287 - * means that scif_send() may be called without blocking. POLLOUT may also be 1286 + * EPOLLOUT - Data may be sent without blocking. For a connected endpoint, this 1287 + * means that scif_send() may be called without blocking. EPOLLOUT may also be 1288 1288 * used to block waiting for a non-blocking connect to complete. This bit value 1289 1289 * has no meaning for a listening endpoint and is ignored if specified. 1290 1290 * 1291 1291 * The following bits are only returned in revents, and are ignored if set in 1292 1292 * events. 1293 - * POLLERR - An error occurred on the endpoint 1294 - * POLLHUP - The connection to the peer endpoint was disconnected 1295 - * POLLNVAL - The specified endpoint descriptor is invalid. 1293 + * EPOLLERR - An error occurred on the endpoint 1294 + * EPOLLHUP - The connection to the peer endpoint was disconnected 1295 + * EPOLLNVAL - The specified endpoint descriptor is invalid. 1296 1296 * 1297 1297 * Return: 1298 1298 * Upon successful completion, scif_poll() returns a non-negative value. A
+3 -3
include/media/videobuf2-core.h
··· 443 443 * @fileio_read_once: report EOF after reading the first buffer 444 444 * @fileio_write_immediately: queue buffer after each write() call 445 445 * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver 446 - * @quirk_poll_must_check_waiting_for_buffers: Return %POLLERR at poll when QBUF 446 + * @quirk_poll_must_check_waiting_for_buffers: Return %EPOLLERR at poll when QBUF 447 447 * has not been called. This is a vb1 idiom that has been adopted 448 448 * also by vb2. 449 449 * @lock: pointer to a mutex that protects the &struct vb2_queue. The ··· 493 493 * @error: a fatal error occurred on the queue 494 494 * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for 495 495 * buffers. Only set for capture queues if qbuf has not yet been 496 - * called since poll() needs to return %POLLERR in that situation. 496 + * called since poll() needs to return %EPOLLERR in that situation. 497 497 * @is_multiplanar: set if buffer type is multiplanar 498 498 * @is_output: set if buffer type is output 499 499 * @copy_timestamp: set if vb2-core should set timestamps ··· 869 869 * @q: pointer to &struct vb2_queue with videobuf2 queue. 870 870 * 871 871 * Flag that a fatal unrecoverable error has occurred and wake up all processes 872 - * waiting on the queue. Polling will now set %POLLERR and queuing and dequeuing 872 + * waiting on the queue. Polling will now set %EPOLLERR and queuing and dequeuing 873 873 * buffers will return %-EIO. 874 874 * 875 875 * The error flag will be cleared when canceling the queue, either from
+1 -1
include/net/inet_connection_sock.h
··· 310 310 static inline __poll_t inet_csk_listen_poll(const struct sock *sk) 311 311 { 312 312 return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? 313 - (POLLIN | POLLRDNORM) : 0; 313 + (EPOLLIN | EPOLLRDNORM) : 0; 314 314 } 315 315 316 316 int inet_csk_listen_start(struct sock *sk, int backlog);
+2 -2
ipc/mqueue.c
··· 578 578 579 579 spin_lock(&info->lock); 580 580 if (info->attr.mq_curmsgs) 581 - retval = POLLIN | POLLRDNORM; 581 + retval = EPOLLIN | EPOLLRDNORM; 582 582 583 583 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) 584 - retval |= POLLOUT | POLLWRNORM; 584 + retval |= EPOLLOUT | EPOLLWRNORM; 585 585 spin_unlock(&info->lock); 586 586 587 587 return retval;
+1 -1
kernel/events/core.c
··· 4524 4524 { 4525 4525 struct perf_event *event = file->private_data; 4526 4526 struct ring_buffer *rb; 4527 - __poll_t events = POLLHUP; 4527 + __poll_t events = EPOLLHUP; 4528 4528 4529 4529 poll_wait(file, &event->waitq, wait); 4530 4530
+1 -1
kernel/events/ring_buffer.c
··· 19 19 20 20 static void perf_output_wakeup(struct perf_output_handle *handle) 21 21 { 22 - atomic_set(&handle->rb->poll, POLLIN); 22 + atomic_set(&handle->rb->poll, EPOLLIN); 23 23 24 24 handle->event->pending_wakeup = 1; 25 25 irq_work_queue(&handle->event->pending);
+3 -3
kernel/printk/printk.c
··· 930 930 __poll_t ret = 0; 931 931 932 932 if (!user) 933 - return POLLERR|POLLNVAL; 933 + return EPOLLERR|EPOLLNVAL; 934 934 935 935 poll_wait(file, &log_wait, wait); 936 936 ··· 938 938 if (user->seq < log_next_seq) { 939 939 /* return error when data has vanished underneath us */ 940 940 if (user->seq < log_first_seq) 941 - ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI; 941 + ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI; 942 942 else 943 - ret = POLLIN|POLLRDNORM; 943 + ret = EPOLLIN|EPOLLRDNORM; 944 944 } 945 945 logbuf_unlock_irq(); 946 946
+2 -2
kernel/relay.c
··· 924 924 struct rchan_buf *buf = filp->private_data; 925 925 926 926 if (buf->finalized) 927 - return POLLERR; 927 + return EPOLLERR; 928 928 929 929 if (filp->f_mode & FMODE_READ) { 930 930 poll_wait(filp, &buf->read_wait, wait); 931 931 if (!relay_buf_empty(buf)) 932 - mask |= POLLIN | POLLRDNORM; 932 + mask |= EPOLLIN | EPOLLRDNORM; 933 933 } 934 934 935 935 return mask;
+1 -1
kernel/time/posix-clock.c
··· 74 74 __poll_t result = 0; 75 75 76 76 if (!clk) 77 - return POLLERR; 77 + return EPOLLERR; 78 78 79 79 if (clk->ops.poll) 80 80 result = clk->ops.poll(clk, fp, wait);
+2 -2
kernel/trace/ring_buffer.c
··· 627 627 * as data is added to any of the @buffer's cpu buffers. Otherwise 628 628 * it will wait for data to be added to a specific cpu buffer. 629 629 * 630 - * Returns POLLIN | POLLRDNORM if data exists in the buffers, 630 + * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers, 631 631 * zero otherwise. 632 632 */ 633 633 __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, ··· 665 665 666 666 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || 667 667 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) 668 - return POLLIN | POLLRDNORM; 668 + return EPOLLIN | EPOLLRDNORM; 669 669 return 0; 670 670 } 671 671
+2 -2
kernel/trace/trace.c
··· 5623 5623 5624 5624 /* Iterators are static, they should be filled or empty */ 5625 5625 if (trace_buffer_iter(iter, iter->cpu_file)) 5626 - return POLLIN | POLLRDNORM; 5626 + return EPOLLIN | EPOLLRDNORM; 5627 5627 5628 5628 if (tr->trace_flags & TRACE_ITER_BLOCK) 5629 5629 /* 5630 5630 * Always select as readable when in blocking mode 5631 5631 */ 5632 - return POLLIN | POLLRDNORM; 5632 + return EPOLLIN | EPOLLRDNORM; 5633 5633 else 5634 5634 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, 5635 5635 filp, poll_table);
+2 -2
mm/memcontrol.c
··· 3688 3688 } 3689 3689 3690 3690 /* 3691 - * Gets called on POLLHUP on eventfd when user closes it. 3691 + * Gets called on EPOLLHUP on eventfd when user closes it. 3692 3692 * 3693 3693 * Called with wqh->lock held and interrupts disabled. 3694 3694 */ ··· 3700 3700 struct mem_cgroup *memcg = event->memcg; 3701 3701 __poll_t flags = key_to_poll(key); 3702 3702 3703 - if (flags & POLLHUP) { 3703 + if (flags & EPOLLHUP) { 3704 3704 /* 3705 3705 * If the event has been detached at cgroup removal, we 3706 3706 * can simply return knowing the other side will cleanup
+2 -2
mm/swapfile.c
··· 2705 2705 2706 2706 if (seq->poll_event != atomic_read(&proc_poll_event)) { 2707 2707 seq->poll_event = atomic_read(&proc_poll_event); 2708 - return POLLIN | POLLRDNORM | POLLERR | POLLPRI; 2708 + return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI; 2709 2709 } 2710 2710 2711 - return POLLIN | POLLRDNORM; 2711 + return EPOLLIN | EPOLLRDNORM; 2712 2712 } 2713 2713 2714 2714 /* iterator */
+13 -13
net/9p/trans_fd.c
··· 240 240 if (!ts) { 241 241 if (err) 242 242 *err = -EREMOTEIO; 243 - return POLLERR; 243 + return EPOLLERR; 244 244 } 245 245 246 246 if (!ts->rd->f_op->poll) ··· 253 253 n = DEFAULT_POLLMASK; 254 254 else 255 255 n = ts->wr->f_op->poll(ts->wr, pt); 256 - ret = (ret & ~POLLOUT) | (n & ~POLLIN); 256 + ret = (ret & ~EPOLLOUT) | (n & ~EPOLLIN); 257 257 } 258 258 259 259 return ret; ··· 396 396 397 397 if (!list_empty(&m->req_list)) { 398 398 if (test_and_clear_bit(Rpending, &m->wsched)) 399 - n = POLLIN; 399 + n = EPOLLIN; 400 400 else 401 401 n = p9_fd_poll(m->client, NULL, NULL); 402 402 403 - if ((n & POLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) { 403 + if ((n & EPOLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) { 404 404 p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m); 405 405 schedule_work(&m->rq); 406 406 } ··· 505 505 506 506 if (m->wsize || !list_empty(&m->unsent_req_list)) { 507 507 if (test_and_clear_bit(Wpending, &m->wsched)) 508 - n = POLLOUT; 508 + n = EPOLLOUT; 509 509 else 510 510 n = p9_fd_poll(m->client, NULL, NULL); 511 511 512 - if ((n & POLLOUT) && 512 + if ((n & EPOLLOUT) && 513 513 !test_and_set_bit(Wworksched, &m->wsched)) { 514 514 p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m); 515 515 schedule_work(&m->wq); ··· 599 599 init_poll_funcptr(&m->pt, p9_pollwait); 600 600 601 601 n = p9_fd_poll(client, &m->pt, NULL); 602 - if (n & POLLIN) { 602 + if (n & EPOLLIN) { 603 603 p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m); 604 604 set_bit(Rpending, &m->wsched); 605 605 } 606 606 607 - if (n & POLLOUT) { 607 + if (n & EPOLLOUT) { 608 608 p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); 609 609 set_bit(Wpending, &m->wsched); 610 610 } ··· 625 625 return; 626 626 627 627 n = p9_fd_poll(m->client, NULL, &err); 628 - if (n & (POLLERR | POLLHUP | POLLNVAL)) { 628 + if (n & (EPOLLERR | EPOLLHUP | EPOLLNVAL)) { 629 629 p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n); 630 630 p9_conn_cancel(m, err); 631 631 } 632 632 633 - if (n & POLLIN) { 633 + if (n & EPOLLIN) { 634 634 set_bit(Rpending, &m->wsched); 635 635 p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m); 636 636 if (!test_and_set_bit(Rworksched, &m->wsched)) { ··· 639 639 } 640 640 } 641 641 642 - if (n & POLLOUT) { 642 + if (n & EPOLLOUT) { 643 643 set_bit(Wpending, &m->wsched); 644 644 p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); 645 645 if ((m->wsize || !list_empty(&m->unsent_req_list)) && ··· 678 678 spin_unlock(&client->lock); 679 679 680 680 if (test_and_clear_bit(Wpending, &m->wsched)) 681 - n = POLLOUT; 681 + n = EPOLLOUT; 682 682 else 683 683 n = p9_fd_poll(m->client, NULL, NULL); 684 684 685 - if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) 685 + if (n & EPOLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) 686 686 schedule_work(&m->wq); 687 687 688 688 return 0;
+4 -4
net/atm/common.c
··· 661 661 662 662 /* exceptional events */ 663 663 if (sk->sk_err) 664 - mask = POLLERR; 664 + mask = EPOLLERR; 665 665 666 666 if (test_bit(ATM_VF_RELEASED, &vcc->flags) || 667 667 test_bit(ATM_VF_CLOSE, &vcc->flags)) 668 - mask |= POLLHUP; 668 + mask |= EPOLLHUP; 669 669 670 670 /* readable? */ 671 671 if (!skb_queue_empty(&sk->sk_receive_queue)) 672 - mask |= POLLIN | POLLRDNORM; 672 + mask |= EPOLLIN | EPOLLRDNORM; 673 673 674 674 /* writable? */ 675 675 if (sock->state == SS_CONNECTING && ··· 678 678 679 679 if (vcc->qos.txtp.traffic_class != ATM_NONE && 680 680 vcc_writable(sk)) 681 - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 681 + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 682 682 683 683 return mask; 684 684 }
+1 -1
net/batman-adv/icmp_socket.c
··· 304 304 poll_wait(file, &socket_client->queue_wait, wait); 305 305 306 306 if (socket_client->queue_len > 0) 307 - return POLLIN | POLLRDNORM; 307 + return EPOLLIN | EPOLLRDNORM; 308 308 309 309 return 0; 310 310 }
+1 -1
net/batman-adv/log.c
··· 193 193 poll_wait(file, &debug_log->queue_wait, wait); 194 194 195 195 if (!batadv_log_empty(debug_log)) 196 - return POLLIN | POLLRDNORM; 196 + return EPOLLIN | EPOLLRDNORM; 197 197 198 198 return 0; 199 199 }
+8 -8
net/bluetooth/af_bluetooth.c
··· 431 431 if (sk->sk_state == BT_CONNECTED || 432 432 (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) && 433 433 sk->sk_state == BT_CONNECT2)) 434 - return POLLIN | POLLRDNORM; 434 + return EPOLLIN | EPOLLRDNORM; 435 435 } 436 436 437 437 return 0; ··· 451 451 return bt_accept_poll(sk); 452 452 453 453 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 454 - mask |= POLLERR | 455 - (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 454 + mask |= EPOLLERR | 455 + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 456 456 457 457 if (sk->sk_shutdown & RCV_SHUTDOWN) 458 - mask |= POLLRDHUP | POLLIN | POLLRDNORM; 458 + mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 459 459 460 460 if (sk->sk_shutdown == SHUTDOWN_MASK) 461 - mask |= POLLHUP; 461 + mask |= EPOLLHUP; 462 462 463 463 if (!skb_queue_empty(&sk->sk_receive_queue)) 464 - mask |= POLLIN | POLLRDNORM; 464 + mask |= EPOLLIN | EPOLLRDNORM; 465 465 466 466 if (sk->sk_state == BT_CLOSED) 467 - mask |= POLLHUP; 467 + mask |= EPOLLHUP; 468 468 469 469 if (sk->sk_state == BT_CONNECT || 470 470 sk->sk_state == BT_CONNECT2 || ··· 472 472 return mask; 473 473 474 474 if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk)) 475 - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 475 + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 476 476 else 477 477 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 478 478
+6 -6
net/caif/caif_socket.c
··· 924 924 925 925 caif_disconnect_client(sock_net(sk), &cf_sk->layer); 926 926 cf_sk->sk.sk_socket->state = SS_DISCONNECTING; 927 - wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP); 927 + wake_up_interruptible_poll(sk_sleep(sk), EPOLLERR|EPOLLHUP); 928 928 929 929 sock_orphan(sk); 930 930 sk_stream_kill_queues(&cf_sk->sk); ··· 946 946 947 947 /* exceptional events? */ 948 948 if (sk->sk_err) 949 - mask |= POLLERR; 949 + mask |= EPOLLERR; 950 950 if (sk->sk_shutdown == SHUTDOWN_MASK) 951 - mask |= POLLHUP; 951 + mask |= EPOLLHUP; 952 952 if (sk->sk_shutdown & RCV_SHUTDOWN) 953 - mask |= POLLRDHUP; 953 + mask |= EPOLLRDHUP; 954 954 955 955 /* readable? */ 956 956 if (!skb_queue_empty(&sk->sk_receive_queue) || 957 957 (sk->sk_shutdown & RCV_SHUTDOWN)) 958 - mask |= POLLIN | POLLRDNORM; 958 + mask |= EPOLLIN | EPOLLRDNORM; 959 959 960 960 /* 961 961 * we set writable also when the other side has shut down the 962 962 * connection. This prevents stuck sockets. 963 963 */ 964 964 if (sock_writeable(sk) && tx_flow_is_on(cf_sk)) 965 - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 965 + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 966 966 967 967 return mask; 968 968 }
+8 -8
net/core/datagram.c
··· 75 75 /* 76 76 * Avoid a wakeup if event not interesting for us 77 77 */ 78 - if (key && !(key_to_poll(key) & (POLLIN | POLLERR))) 78 + if (key && !(key_to_poll(key) & (EPOLLIN | EPOLLERR))) 79 79 return 0; 80 80 return autoremove_wake_function(wait, mode, sync, key); 81 81 } ··· 842 842 843 843 /* exceptional events? */ 844 844 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 845 - mask |= POLLERR | 846 - (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 845 + mask |= EPOLLERR | 846 + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 847 847 848 848 if (sk->sk_shutdown & RCV_SHUTDOWN) 849 - mask |= POLLRDHUP | POLLIN | POLLRDNORM; 849 + mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 850 850 if (sk->sk_shutdown == SHUTDOWN_MASK) 851 - mask |= POLLHUP; 851 + mask |= EPOLLHUP; 852 852 853 853 /* readable? */ 854 854 if (!skb_queue_empty(&sk->sk_receive_queue)) 855 - mask |= POLLIN | POLLRDNORM; 855 + mask |= EPOLLIN | EPOLLRDNORM; 856 856 857 857 /* Connection-based need to check for termination and startup */ 858 858 if (connection_based(sk)) { 859 859 if (sk->sk_state == TCP_CLOSE) 860 - mask |= POLLHUP; 860 + mask |= EPOLLHUP; 861 861 /* connection hasn't started yet? */ 862 862 if (sk->sk_state == TCP_SYN_SENT) 863 863 return mask; ··· 865 865 866 866 /* writable? */ 867 867 if (sock_writeable(sk)) 868 - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 868 + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 869 869 else 870 870 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 871 871
+5 -5
net/core/sock.c
··· 2619 2619 rcu_read_lock(); 2620 2620 wq = rcu_dereference(sk->sk_wq); 2621 2621 if (skwq_has_sleeper(wq)) 2622 - wake_up_interruptible_poll(&wq->wait, POLLERR); 2622 + wake_up_interruptible_poll(&wq->wait, EPOLLERR); 2623 2623 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 2624 2624 rcu_read_unlock(); 2625 2625 } ··· 2631 2631 rcu_read_lock(); 2632 2632 wq = rcu_dereference(sk->sk_wq); 2633 2633 if (skwq_has_sleeper(wq)) 2634 - wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | 2635 - POLLRDNORM | POLLRDBAND); 2634 + wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI | 2635 + EPOLLRDNORM | EPOLLRDBAND); 2636 2636 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 2637 2637 rcu_read_unlock(); 2638 2638 } ··· 2649 2649 if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 2650 2650 wq = rcu_dereference(sk->sk_wq); 2651 2651 if (skwq_has_sleeper(wq)) 2652 - wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 2653 - POLLWRNORM | POLLWRBAND); 2652 + wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 2653 + EPOLLWRNORM | EPOLLWRBAND); 2654 2654 2655 2655 /* Should agree with poll, otherwise some programs break */ 2656 2656 if (sock_writeable(sk))
+2 -2
net/core/stream.c
··· 38 38 rcu_read_lock(); 39 39 wq = rcu_dereference(sk->sk_wq); 40 40 if (skwq_has_sleeper(wq)) 41 - wake_up_interruptible_poll(&wq->wait, POLLOUT | 42 - POLLWRNORM | POLLWRBAND); 41 + wake_up_interruptible_poll(&wq->wait, EPOLLOUT | 42 + EPOLLWRNORM | EPOLLWRBAND); 43 43 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) 44 44 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); 45 45 rcu_read_unlock();
+6 -6
net/dccp/proto.c
··· 338 338 339 339 mask = 0; 340 340 if (sk->sk_err) 341 - mask = POLLERR; 341 + mask = EPOLLERR; 342 342 343 343 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED) 344 - mask |= POLLHUP; 344 + mask |= EPOLLHUP; 345 345 if (sk->sk_shutdown & RCV_SHUTDOWN) 346 - mask |= POLLIN | POLLRDNORM | POLLRDHUP; 346 + mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 347 347 348 348 /* Connected? */ 349 349 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { 350 350 if (atomic_read(&sk->sk_rmem_alloc) > 0) 351 - mask |= POLLIN | POLLRDNORM; 351 + mask |= EPOLLIN | EPOLLRDNORM; 352 352 353 353 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 354 354 if (sk_stream_is_writeable(sk)) { 355 - mask |= POLLOUT | POLLWRNORM; 355 + mask |= EPOLLOUT | EPOLLWRNORM; 356 356 } else { /* send SIGIO later */ 357 357 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 358 358 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); ··· 362 362 * IO signal will be lost. 363 363 */ 364 364 if (sk_stream_is_writeable(sk)) 365 - mask |= POLLOUT | POLLWRNORM; 365 + mask |= EPOLLOUT | EPOLLWRNORM; 366 366 } 367 367 } 368 368 }
+1 -1
net/decnet/af_decnet.c
··· 1216 1216 __poll_t mask = datagram_poll(file, sock, wait); 1217 1217 1218 1218 if (!skb_queue_empty(&scp->other_receive_queue)) 1219 - mask |= POLLRDBAND; 1219 + mask |= EPOLLRDBAND; 1220 1220 1221 1221 return mask; 1222 1222 }
+1 -1
net/ipv4/af_inet.c
··· 828 828 case TCP_CLOSE: 829 829 err = -ENOTCONN; 830 830 /* Hack to wake up other listeners, who can poll for 831 - POLLHUP, even on eg. unconnected UDP sockets -- RR */ 831 + EPOLLHUP, even on eg. unconnected UDP sockets -- RR */ 832 832 /* fall through */ 833 833 default: 834 834 sk->sk_shutdown |= how;
+17 -17
net/ipv4/tcp.c
··· 512 512 mask = 0; 513 513 514 514 /* 515 - * POLLHUP is certainly not done right. But poll() doesn't 515 + * EPOLLHUP is certainly not done right. But poll() doesn't 516 516 * have a notion of HUP in just one direction, and for a 517 517 * socket the read side is more interesting. 518 518 * 519 - * Some poll() documentation says that POLLHUP is incompatible 520 - * with the POLLOUT/POLLWR flags, so somebody should check this 519 + * Some poll() documentation says that EPOLLHUP is incompatible 520 + * with the EPOLLOUT/POLLWR flags, so somebody should check this 521 521 * all. But careful, it tends to be safer to return too many 522 522 * bits than too few, and you can easily break real applications 523 523 * if you don't tell them that something has hung up! 524 524 * 525 525 * Check-me. 526 526 * 527 - * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and 527 + * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and 528 528 * our fs/select.c). It means that after we received EOF, 529 529 * poll always returns immediately, making impossible poll() on write() 530 - * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP 530 + * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP 531 531 * if and only if shutdown has been made in both directions. 532 532 * Actually, it is interesting to look how Solaris and DUX 533 - * solve this dilemma. I would prefer, if POLLHUP were maskable, 533 + * solve this dilemma. I would prefer, if EPOLLHUP were maskable, 534 534 * then we could set it on SND_SHUTDOWN. BTW examples given 535 535 * in Stevens' books assume exactly this behaviour, it explains 536 - * why POLLHUP is incompatible with POLLOUT. --ANK 536 + * why EPOLLHUP is incompatible with EPOLLOUT. --ANK 537 537 * 538 538 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 539 539 * blocking on fresh not-connected or disconnected socket. --ANK 540 540 */ 541 541 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) 542 - mask |= POLLHUP; 542 + mask |= EPOLLHUP; 543 543 if (sk->sk_shutdown & RCV_SHUTDOWN) 544 - mask |= POLLIN | POLLRDNORM | POLLRDHUP; 544 + mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 545 545 546 546 /* Connected or passive Fast Open socket? */ 547 547 if (state != TCP_SYN_SENT && ··· 554 554 target++; 555 555 556 556 if (tp->rcv_nxt - tp->copied_seq >= target) 557 - mask |= POLLIN | POLLRDNORM; 557 + mask |= EPOLLIN | EPOLLRDNORM; 558 558 559 559 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 560 560 if (sk_stream_is_writeable(sk)) { 561 - mask |= POLLOUT | POLLWRNORM; 561 + mask |= EPOLLOUT | EPOLLWRNORM; 562 562 } else { /* send SIGIO later */ 563 563 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 564 564 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); ··· 570 570 */ 571 571 smp_mb__after_atomic(); 572 572 if (sk_stream_is_writeable(sk)) 573 - mask |= POLLOUT | POLLWRNORM; 573 + mask |= EPOLLOUT | EPOLLWRNORM; 574 574 } 575 575 } else 576 - mask |= POLLOUT | POLLWRNORM; 576 + mask |= EPOLLOUT | EPOLLWRNORM; 577 577 578 578 if (tp->urg_data & TCP_URG_VALID) 579 - mask |= POLLPRI; 579 + mask |= EPOLLPRI; 580 580 } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { 581 581 /* Active TCP fastopen socket with defer_connect 582 - * Return POLLOUT so application can call write() 582 + * Return EPOLLOUT so application can call write() 583 583 * in order for kernel to generate SYN+data 584 584 */ 585 - mask |= POLLOUT | POLLWRNORM; 585 + mask |= EPOLLOUT | EPOLLWRNORM; 586 586 } 587 587 /* This barrier is coupled with smp_wmb() in tcp_reset() */ 588 588 smp_rmb(); 589 589 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 590 - mask |= POLLERR; 590 + mask |= EPOLLERR; 591 591 592 592 return mask; 593 593 }
+1 -1
net/ipv4/tcp_input.c
··· 315 315 316 316 /* Fast Recovery (RFC 5681 3.2) : 317 317 * Cubic needs 1.7 factor, rounded to 2 to include 318 - * extra cushion (application might react slowly to POLLOUT) 318 + * extra cushion (application might react slowly to EPOLLOUT) 319 319 */ 320 320 sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2; 321 321 sndmem *= nr_segs * per_mss;
+3 -3
net/ipv4/udp.c
··· 2501 2501 struct sock *sk = sock->sk; 2502 2502 2503 2503 if (!skb_queue_empty(&udp_sk(sk)->reader_queue)) 2504 - mask |= POLLIN | POLLRDNORM; 2504 + mask |= EPOLLIN | EPOLLRDNORM; 2505 2505 2506 2506 /* Check for false positives due to checksum errors */ 2507 - if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) && 2507 + if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) && 2508 2508 !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) 2509 - mask &= ~(POLLIN | POLLRDNORM); 2509 + mask &= ~(EPOLLIN | EPOLLRDNORM); 2510 2510 2511 2511 return mask; 2512 2512
+9 -9
net/iucv/af_iucv.c
··· 1483 1483 sk = (struct sock *) isk; 1484 1484 1485 1485 if (sk->sk_state == IUCV_CONNECTED) 1486 - return POLLIN | POLLRDNORM; 1486 + return EPOLLIN | EPOLLRDNORM; 1487 1487 } 1488 1488 1489 1489 return 0; ··· 1501 1501 return iucv_accept_poll(sk); 1502 1502 1503 1503 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 1504 - mask |= POLLERR | 1505 - (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 1504 + mask |= EPOLLERR | 1505 + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 1506 1506 1507 1507 if (sk->sk_shutdown & RCV_SHUTDOWN) 1508 - mask |= POLLRDHUP; 1508 + mask |= EPOLLRDHUP; 1509 1509 1510 1510 if (sk->sk_shutdown == SHUTDOWN_MASK) 1511 - mask |= POLLHUP; 1511 + mask |= EPOLLHUP; 1512 1512 1513 1513 if (!skb_queue_empty(&sk->sk_receive_queue) || 1514 1514 (sk->sk_shutdown & RCV_SHUTDOWN)) 1515 - mask |= POLLIN | POLLRDNORM; 1515 + mask |= EPOLLIN | EPOLLRDNORM; 1516 1516 1517 1517 if (sk->sk_state == IUCV_CLOSED) 1518 - mask |= POLLHUP; 1518 + mask |= EPOLLHUP; 1519 1519 1520 1520 if (sk->sk_state == IUCV_DISCONN) 1521 - mask |= POLLIN; 1521 + mask |= EPOLLIN; 1522 1522 1523 1523 if (sock_writeable(sk) && iucv_below_msglim(sk)) 1524 - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 1524 + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 1525 1525 else 1526 1526 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1527 1527
+3 -3
net/kcm/kcmsock.c
··· 396 396 397 397 static void psock_state_change(struct sock *sk) 398 398 { 399 - /* TCP only does a POLLIN for a half close. Do a POLLHUP here 400 - * since application will normally not poll with POLLIN 399 + /* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here 400 + * since application will normally not poll with EPOLLIN 401 401 * on the TCP sockets. 402 402 */ 403 403 ··· 1338 1338 1339 1339 /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so 1340 1340 * we set sk_state, otherwise epoll_wait always returns right away with 1341 - * POLLHUP 1341 + * EPOLLHUP 1342 1342 */ 1343 1343 kcm->sk.sk_state = TCP_ESTABLISHED; 1344 1344
+8 -8
net/nfc/llcp_sock.c
··· 543 543 sk = &llcp_sock->sk; 544 544 545 545 if (sk->sk_state == LLCP_CONNECTED) 546 - return POLLIN | POLLRDNORM; 546 + return EPOLLIN | EPOLLRDNORM; 547 547 } 548 548 549 549 return 0; ··· 563 563 return llcp_accept_poll(sk); 564 564 565 565 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 566 - mask |= POLLERR | 567 - (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 566 + mask |= EPOLLERR | 567 + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 568 568 569 569 if (!skb_queue_empty(&sk->sk_receive_queue)) 570 - mask |= POLLIN | POLLRDNORM; 570 + mask |= EPOLLIN | EPOLLRDNORM; 571 571 572 572 if (sk->sk_state == LLCP_CLOSED) 573 - mask |= POLLHUP; 573 + mask |= EPOLLHUP; 574 574 575 575 if (sk->sk_shutdown & RCV_SHUTDOWN) 576 - mask |= POLLRDHUP | POLLIN | POLLRDNORM; 576 + mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 577 577 578 578 if (sk->sk_shutdown == SHUTDOWN_MASK) 579 - mask |= POLLHUP; 579 + mask |= EPOLLHUP; 580 580 581 581 if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED) 582 - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 582 + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 583 583 else 584 584 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 585 585
+2 -2
net/packet/af_packet.c
··· 4085 4085 if (po->rx_ring.pg_vec) { 4086 4086 if (!packet_previous_rx_frame(po, &po->rx_ring, 4087 4087 TP_STATUS_KERNEL)) 4088 - mask |= POLLIN | POLLRDNORM; 4088 + mask |= EPOLLIN | EPOLLRDNORM; 4089 4089 } 4090 4090 if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) 4091 4091 po->pressure = 0; ··· 4093 4093 spin_lock_bh(&sk->sk_write_queue.lock); 4094 4094 if (po->tx_ring.pg_vec) { 4095 4095 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) 4096 - mask |= POLLOUT | POLLWRNORM; 4096 + mask |= EPOLLOUT | EPOLLWRNORM; 4097 4097 } 4098 4098 spin_unlock_bh(&sk->sk_write_queue.lock); 4099 4099 return mask;
+5 -5
net/phonet/socket.c
··· 351 351 poll_wait(file, sk_sleep(sk), wait); 352 352 353 353 if (sk->sk_state == TCP_CLOSE) 354 - return POLLERR; 354 + return EPOLLERR; 355 355 if (!skb_queue_empty(&sk->sk_receive_queue)) 356 - mask |= POLLIN | POLLRDNORM; 356 + mask |= EPOLLIN | EPOLLRDNORM; 357 357 if (!skb_queue_empty(&pn->ctrlreq_queue)) 358 - mask |= POLLPRI; 358 + mask |= EPOLLPRI; 359 359 if (!mask && sk->sk_state == TCP_CLOSE_WAIT) 360 - return POLLHUP; 360 + return EPOLLHUP; 361 361 362 362 if (sk->sk_state == TCP_ESTABLISHED && 363 363 refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf && 364 364 atomic_read(&pn->tx_credits)) 365 - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 365 + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 366 366 367 367 return mask; 368 368 }
+8 -8
net/rds/af_rds.c
··· 137 137 138 138 /* 139 139 * RDS' poll is without a doubt the least intuitive part of the interface, 140 - * as POLLIN and POLLOUT do not behave entirely as you would expect from 140 + * as EPOLLIN and EPOLLOUT do not behave entirely as you would expect from 141 141 * a network protocol. 142 142 * 143 - * POLLIN is asserted if 143 + * EPOLLIN is asserted if 144 144 * - there is data on the receive queue. 145 145 * - to signal that a previously congested destination may have become 146 146 * uncongested 147 147 * - A notification has been queued to the socket (this can be a congestion 148 148 * update, or a RDMA completion). 149 149 * 150 - * POLLOUT is asserted if there is room on the send queue. This does not mean 150 + * EPOLLOUT is asserted if there is room on the send queue. This does not mean 151 151 * however, that the next sendmsg() call will succeed. If the application tries 152 152 * to send to a congested destination, the system call may still fail (and 153 153 * return ENOBUFS). ··· 167 167 168 168 read_lock_irqsave(&rs->rs_recv_lock, flags); 169 169 if (!rs->rs_cong_monitor) { 170 - /* When a congestion map was updated, we signal POLLIN for 170 + /* When a congestion map was updated, we signal EPOLLIN for 171 171 * "historical" reasons. Applications can also poll for 172 172 * WRBAND instead. */ 173 173 if (rds_cong_updated_since(&rs->rs_cong_track)) 174 - mask |= (POLLIN | POLLRDNORM | POLLWRBAND); 174 + mask |= (EPOLLIN | EPOLLRDNORM | EPOLLWRBAND); 175 175 } else { 176 176 spin_lock(&rs->rs_lock); 177 177 if (rs->rs_cong_notify) 178 - mask |= (POLLIN | POLLRDNORM); 178 + mask |= (EPOLLIN | EPOLLRDNORM); 179 179 spin_unlock(&rs->rs_lock); 180 180 } 181 181 if (!list_empty(&rs->rs_recv_queue) || 182 182 !list_empty(&rs->rs_notify_queue)) 183 - mask |= (POLLIN | POLLRDNORM); 183 + mask |= (EPOLLIN | EPOLLRDNORM); 184 184 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) 185 - mask |= (POLLOUT | POLLWRNORM); 185 + mask |= (EPOLLOUT | EPOLLWRNORM); 186 186 read_unlock_irqrestore(&rs->rs_recv_lock, flags); 187 187 188 188 /* clear state any time we wake a seen-congested socket */
+2 -2
net/rfkill/core.c
··· 1142 1142 static __poll_t rfkill_fop_poll(struct file *file, poll_table *wait) 1143 1143 { 1144 1144 struct rfkill_data *data = file->private_data; 1145 - __poll_t res = POLLOUT | POLLWRNORM; 1145 + __poll_t res = EPOLLOUT | EPOLLWRNORM; 1146 1146 1147 1147 poll_wait(file, &data->read_wait, wait); 1148 1148 1149 1149 mutex_lock(&data->mtx); 1150 1150 if (!list_empty(&data->events)) 1151 - res = POLLIN | POLLRDNORM; 1151 + res = EPOLLIN | EPOLLRDNORM; 1152 1152 mutex_unlock(&data->mtx); 1153 1153 1154 1154 return res;
+2 -2
net/rxrpc/af_rxrpc.c
··· 742 742 /* the socket is readable if there are any messages waiting on the Rx 743 743 * queue */ 744 744 if (!list_empty(&rx->recvmsg_q)) 745 - mask |= POLLIN | POLLRDNORM; 745 + mask |= EPOLLIN | EPOLLRDNORM; 746 746 747 747 /* the socket is writable if there is space to add new data to the 748 748 * socket; there is no guarantee that any particular call in progress 749 749 * on the socket may have space in the Tx ACK window */ 750 750 if (rxrpc_writable(sk)) 751 - mask |= POLLOUT | POLLWRNORM; 751 + mask |= EPOLLOUT | EPOLLWRNORM; 752 752 753 753 return mask; 754 754 }
+10 -10
net/sctp/socket.c
··· 7602 7602 */ 7603 7603 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 7604 7604 return (!list_empty(&sp->ep->asocs)) ? 7605 - (POLLIN | POLLRDNORM) : 0; 7605 + (EPOLLIN | EPOLLRDNORM) : 0; 7606 7606 7607 7607 mask = 0; 7608 7608 7609 7609 /* Is there any exceptional events? */ 7610 7610 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 7611 - mask |= POLLERR | 7612 - (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 7611 + mask |= EPOLLERR | 7612 + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 7613 7613 if (sk->sk_shutdown & RCV_SHUTDOWN) 7614 - mask |= POLLRDHUP | POLLIN | POLLRDNORM; 7614 + mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 7615 7615 if (sk->sk_shutdown == SHUTDOWN_MASK) 7616 - mask |= POLLHUP; 7616 + mask |= EPOLLHUP; 7617 7617 7618 7618 /* Is it readable? Reconsider this code with TCP-style support. */ 7619 7619 if (!skb_queue_empty(&sk->sk_receive_queue)) 7620 - mask |= POLLIN | POLLRDNORM; 7620 + mask |= EPOLLIN | EPOLLRDNORM; 7621 7621 7622 7622 /* The association is either gone or not ready. */ 7623 7623 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) ··· 7625 7625 7626 7626 /* Is it writable? */ 7627 7627 if (sctp_writeable(sk)) { 7628 - mask |= POLLOUT | POLLWRNORM; 7628 + mask |= EPOLLOUT | EPOLLWRNORM; 7629 7629 } else { 7630 7630 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 7631 7631 /* ··· 7637 7637 * in the following code to cover it as well. 7638 7638 */ 7639 7639 if (sctp_writeable(sk)) 7640 - mask |= POLLOUT | POLLWRNORM; 7640 + mask |= EPOLLOUT | EPOLLWRNORM; 7641 7641 } 7642 7642 return mask; 7643 7643 } ··· 8161 8161 rcu_read_lock(); 8162 8162 wq = rcu_dereference(sk->sk_wq); 8163 8163 if (skwq_has_sleeper(wq)) 8164 - wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 8165 - POLLRDNORM | POLLRDBAND); 8164 + wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | 8165 + EPOLLRDNORM | EPOLLRDBAND); 8166 8166 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 8167 8167 rcu_read_unlock(); 8168 8168 }
+12 -12
net/smc/af_smc.c
··· 1145 1145 1146 1146 spin_lock(&isk->accept_q_lock); 1147 1147 if (!list_empty(&isk->accept_q)) 1148 - mask = POLLIN | POLLRDNORM; 1148 + mask = EPOLLIN | EPOLLRDNORM; 1149 1149 spin_unlock(&isk->accept_q_lock); 1150 1150 1151 1151 return mask; ··· 1160 1160 int rc; 1161 1161 1162 1162 if (!sk) 1163 - return POLLNVAL; 1163 + return EPOLLNVAL; 1164 1164 1165 1165 smc = smc_sk(sock->sk); 1166 1166 sock_hold(sk); ··· 1171 1171 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); 1172 1172 /* if non-blocking connect finished ... */ 1173 1173 lock_sock(sk); 1174 - if ((sk->sk_state == SMC_INIT) && (mask & POLLOUT)) { 1174 + if ((sk->sk_state == SMC_INIT) && (mask & EPOLLOUT)) { 1175 1175 sk->sk_err = smc->clcsock->sk->sk_err; 1176 1176 if (sk->sk_err) { 1177 - mask |= POLLERR; 1177 + mask |= EPOLLERR; 1178 1178 } else { 1179 1179 rc = smc_connect_rdma(smc); 1180 1180 if (rc < 0) 1181 - mask |= POLLERR; 1181 + mask |= EPOLLERR; 1182 1182 /* success cases including fallback */ 1183 - mask |= POLLOUT | POLLWRNORM; 1183 + mask |= EPOLLOUT | EPOLLWRNORM; 1184 1184 } 1185 1185 } 1186 1186 } else { ··· 1190 1190 lock_sock(sk); 1191 1191 } 1192 1192 if (sk->sk_err) 1193 - mask |= POLLERR; 1193 + mask |= EPOLLERR; 1194 1194 if ((sk->sk_shutdown == SHUTDOWN_MASK) || 1195 1195 (sk->sk_state == SMC_CLOSED)) 1196 - mask |= POLLHUP; 1196 + mask |= EPOLLHUP; 1197 1197 if (sk->sk_state == SMC_LISTEN) { 1198 1198 /* woken up by sk_data_ready in smc_listen_work() */ 1199 1199 mask = smc_accept_poll(sk); 1200 1200 } else { 1201 1201 if (atomic_read(&smc->conn.sndbuf_space) || 1202 1202 sk->sk_shutdown & SEND_SHUTDOWN) { 1203 - mask |= POLLOUT | POLLWRNORM; 1203 + mask |= EPOLLOUT | EPOLLWRNORM; 1204 1204 } else { 1205 1205 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1206 1206 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1207 1207 } 1208 1208 if (atomic_read(&smc->conn.bytes_to_rcv)) 1209 - mask |= POLLIN | POLLRDNORM; 1209 + mask |= EPOLLIN | EPOLLRDNORM; 1210 1210 if (sk->sk_shutdown & RCV_SHUTDOWN) 1211 - mask |= POLLIN | POLLRDNORM | POLLRDHUP; 1211 + mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 1212 1212 if (sk->sk_state == SMC_APPCLOSEWAIT1) 1213 - mask |= POLLIN; 1213 + mask |= EPOLLIN; 1214 1214 } 1215 1215 1216 1216 }
+2 -2
net/smc/smc_rx.c
··· 35 35 rcu_read_lock(); 36 36 wq = rcu_dereference(sk->sk_wq); 37 37 if (skwq_has_sleeper(wq)) 38 - wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | 39 - POLLRDNORM | POLLRDBAND); 38 + wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI | 39 + EPOLLRDNORM | EPOLLRDBAND); 40 40 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 41 41 if ((sk->sk_shutdown == SHUTDOWN_MASK) || 42 42 (sk->sk_state == SMC_CLOSED))
+2 -2
net/smc/smc_tx.c
··· 46 46 wq = rcu_dereference(sk->sk_wq); 47 47 if (skwq_has_sleeper(wq)) 48 48 wake_up_interruptible_poll(&wq->wait, 49 - POLLOUT | POLLWRNORM | 50 - POLLWRBAND); 49 + EPOLLOUT | EPOLLWRNORM | 50 + EPOLLWRBAND); 51 51 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) 52 52 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); 53 53 rcu_read_unlock();
+2 -2
net/sunrpc/cache.c
··· 940 940 poll_wait(filp, &queue_wait, wait); 941 941 942 942 /* alway allow write */ 943 - mask = POLLOUT | POLLWRNORM; 943 + mask = EPOLLOUT | EPOLLWRNORM; 944 944 945 945 if (!rp) 946 946 return mask; ··· 950 950 for (cq= &rp->q; &cq->list != &cd->queue; 951 951 cq = list_entry(cq->list.next, struct cache_queue, list)) 952 952 if (!cq->reader) { 953 - mask |= POLLIN | POLLRDNORM; 953 + mask |= EPOLLIN | EPOLLRDNORM; 954 954 break; 955 955 } 956 956 spin_unlock(&queue_lock);
+3 -3
net/sunrpc/rpc_pipe.c
··· 345 345 { 346 346 struct inode *inode = file_inode(filp); 347 347 struct rpc_inode *rpci = RPC_I(inode); 348 - __poll_t mask = POLLOUT | POLLWRNORM; 348 + __poll_t mask = EPOLLOUT | EPOLLWRNORM; 349 349 350 350 poll_wait(filp, &rpci->waitq, wait); 351 351 352 352 inode_lock(inode); 353 353 if (rpci->pipe == NULL) 354 - mask |= POLLERR | POLLHUP; 354 + mask |= EPOLLERR | EPOLLHUP; 355 355 else if (filp->private_data || !list_empty(&rpci->pipe->pipe)) 356 - mask |= POLLIN | POLLRDNORM; 356 + mask |= EPOLLIN | EPOLLRDNORM; 357 357 inode_unlock(inode); 358 358 return mask; 359 359 }
+11 -11
net/tipc/socket.c
··· 721 721 sock_poll_wait(file, sk_sleep(sk), wait); 722 722 723 723 if (sk->sk_shutdown & RCV_SHUTDOWN) 724 - revents |= POLLRDHUP | POLLIN | POLLRDNORM; 724 + revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 725 725 if (sk->sk_shutdown == SHUTDOWN_MASK) 726 - revents |= POLLHUP; 726 + revents |= EPOLLHUP; 727 727 728 728 switch (sk->sk_state) { 729 729 case TIPC_ESTABLISHED: 730 730 case TIPC_CONNECTING: 731 731 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) 732 - revents |= POLLOUT; 732 + revents |= EPOLLOUT; 733 733 /* fall thru' */ 734 734 case TIPC_LISTEN: 735 735 if (!skb_queue_empty(&sk->sk_receive_queue)) 736 - revents |= POLLIN | POLLRDNORM; 736 + revents |= EPOLLIN | EPOLLRDNORM; 737 737 break; 738 738 case TIPC_OPEN: 739 739 if (tsk->group_is_open && !tsk->cong_link_cnt) 740 - revents |= POLLOUT; 740 + revents |= EPOLLOUT; 741 741 if (!tipc_sk_type_connectionless(sk)) 742 742 break; 743 743 if (skb_queue_empty(&sk->sk_receive_queue)) 744 744 break; 745 - revents |= POLLIN | POLLRDNORM; 745 + revents |= EPOLLIN | EPOLLRDNORM; 746 746 break; 747 747 case TIPC_DISCONNECTING: 748 - revents = POLLIN | POLLRDNORM | POLLHUP; 748 + revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP; 749 749 break; 750 750 } 751 751 return revents; ··· 1897 1897 rcu_read_lock(); 1898 1898 wq = rcu_dereference(sk->sk_wq); 1899 1899 if (skwq_has_sleeper(wq)) 1900 - wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 1901 - POLLWRNORM | POLLWRBAND); 1900 + wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 1901 + EPOLLWRNORM | EPOLLWRBAND); 1902 1902 rcu_read_unlock(); 1903 1903 } 1904 1904 ··· 1914 1914 rcu_read_lock(); 1915 1915 wq = rcu_dereference(sk->sk_wq); 1916 1916 if (skwq_has_sleeper(wq)) 1917 - wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 1918 - POLLRDNORM | POLLRDBAND); 1917 + wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | 1918 + EPOLLRDNORM | EPOLLRDBAND); 1919 1919 rcu_read_unlock(); 1920 1920 } 1921 1921
+20 -20
net/unix/af_unix.c
··· 415 415 { 416 416 unix_dgram_peer_wake_disconnect(sk, other); 417 417 wake_up_interruptible_poll(sk_sleep(sk), 418 - POLLOUT | 419 - POLLWRNORM | 420 - POLLWRBAND); 418 + EPOLLOUT | 419 + EPOLLWRNORM | 420 + EPOLLWRBAND); 421 421 } 422 422 423 423 /* preconditions: ··· 454 454 wq = rcu_dereference(sk->sk_wq); 455 455 if (skwq_has_sleeper(wq)) 456 456 wake_up_interruptible_sync_poll(&wq->wait, 457 - POLLOUT | POLLWRNORM | POLLWRBAND); 457 + EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); 458 458 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 459 459 } 460 460 rcu_read_unlock(); ··· 2129 2129 2130 2130 if (wq_has_sleeper(&u->peer_wait)) 2131 2131 wake_up_interruptible_sync_poll(&u->peer_wait, 2132 - POLLOUT | POLLWRNORM | 2133 - POLLWRBAND); 2132 + EPOLLOUT | EPOLLWRNORM | 2133 + EPOLLWRBAND); 2134 2134 2135 2135 if (msg->msg_name) 2136 2136 unix_copy_addr(msg, skb->sk); ··· 2650 2650 2651 2651 /* exceptional events? */ 2652 2652 if (sk->sk_err) 2653 - mask |= POLLERR; 2653 + mask |= EPOLLERR; 2654 2654 if (sk->sk_shutdown == SHUTDOWN_MASK) 2655 - mask |= POLLHUP; 2655 + mask |= EPOLLHUP; 2656 2656 if (sk->sk_shutdown & RCV_SHUTDOWN) 2657 - mask |= POLLRDHUP | POLLIN | POLLRDNORM; 2657 + mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 2658 2658 2659 2659 /* readable? */ 2660 2660 if (!skb_queue_empty(&sk->sk_receive_queue)) 2661 - mask |= POLLIN | POLLRDNORM; 2661 + mask |= EPOLLIN | EPOLLRDNORM; 2662 2662 2663 2663 /* Connection-based need to check for termination and startup */ 2664 2664 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && 2665 2665 sk->sk_state == TCP_CLOSE) 2666 - mask |= POLLHUP; 2666 + mask |= EPOLLHUP; 2667 2667 2668 2668 /* 2669 2669 * we set writable also when the other side has shut down the 2670 2670 * connection. This prevents stuck sockets. 2671 2671 */ 2672 2672 if (unix_writable(sk)) 2673 - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 2673 + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 2674 2674 2675 2675 return mask; 2676 2676 } ··· 2687 2687 2688 2688 /* exceptional events? */ 2689 2689 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 2690 - mask |= POLLERR | 2691 - (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); 2690 + mask |= EPOLLERR | 2691 + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 2692 2692 2693 2693 if (sk->sk_shutdown & RCV_SHUTDOWN) 2694 - mask |= POLLRDHUP | POLLIN | POLLRDNORM; 2694 + mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; 2695 2695 if (sk->sk_shutdown == SHUTDOWN_MASK) 2696 - mask |= POLLHUP; 2696 + mask |= EPOLLHUP; 2697 2697 2698 2698 /* readable? */ 2699 2699 if (!skb_queue_empty(&sk->sk_receive_queue)) 2700 - mask |= POLLIN | POLLRDNORM; 2700 + mask |= EPOLLIN | EPOLLRDNORM; 2701 2701 2702 2702 /* Connection-based need to check for termination and startup */ 2703 2703 if (sk->sk_type == SOCK_SEQPACKET) { 2704 2704 if (sk->sk_state == TCP_CLOSE) 2705 - mask |= POLLHUP; 2705 + mask |= EPOLLHUP; 2706 2706 /* connection hasn't started yet? */ 2707 2707 if (sk->sk_state == TCP_SYN_SENT) 2708 2708 return mask; 2709 2709 } 2710 2710 2711 2711 /* No write status requested, avoid expensive OUT tests. */ 2712 - if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT))) 2712 + if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT))) 2713 2713 return mask; 2714 2714 2715 2715 writable = unix_writable(sk); ··· 2726 2726 } 2727 2727 2728 2728 if (writable) 2729 - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 2729 + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 2730 2730 else 2731 2731 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 2732 2732
+15 -15
net/vmw_vsock/af_vsock.c
··· 865 865 866 866 if (sk->sk_err) 867 867 /* Signify that there has been an error on this socket. */ 868 - mask |= POLLERR; 868 + mask |= EPOLLERR; 869 869 870 870 /* INET sockets treat local write shutdown and peer write shutdown as a 871 - * case of POLLHUP set. 871 + * case of EPOLLHUP set. 872 872 */ 873 873 if ((sk->sk_shutdown == SHUTDOWN_MASK) || 874 874 ((sk->sk_shutdown & SEND_SHUTDOWN) && 875 875 (vsk->peer_shutdown & SEND_SHUTDOWN))) { 876 - mask |= POLLHUP; 876 + mask |= EPOLLHUP; 877 877 } 878 878 879 879 if (sk->sk_shutdown & RCV_SHUTDOWN || 880 880 vsk->peer_shutdown & SEND_SHUTDOWN) { 881 - mask |= POLLRDHUP; 881 + mask |= EPOLLRDHUP; 882 882 } 883 883 884 884 if (sock->type == SOCK_DGRAM) { ··· 888 888 */ 889 889 if (!skb_queue_empty(&sk->sk_receive_queue) || 890 890 (sk->sk_shutdown & RCV_SHUTDOWN)) { 891 - mask |= POLLIN | POLLRDNORM; 891 + mask |= EPOLLIN | EPOLLRDNORM; 892 892 } 893 893 894 894 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 895 - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 895 + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 896 896 897 897 } else if (sock->type == SOCK_STREAM) { 898 898 lock_sock(sk); ··· 902 902 */ 903 903 if (sk->sk_state == TCP_LISTEN 904 904 && !vsock_is_accept_queue_empty(sk)) 905 - mask |= POLLIN | POLLRDNORM; 905 + mask |= EPOLLIN | EPOLLRDNORM; 906 906 907 907 /* If there is something in the queue then we can read. */ 908 908 if (transport->stream_is_active(vsk) && ··· 911 911 int ret = transport->notify_poll_in( 912 912 vsk, 1, &data_ready_now); 913 913 if (ret < 0) { 914 - mask |= POLLERR; 914 + mask |= EPOLLERR; 915 915 } else { 916 916 if (data_ready_now) 917 - mask |= POLLIN | POLLRDNORM; 917 + mask |= EPOLLIN | EPOLLRDNORM; 918 918 919 919 } 920 920 } ··· 925 925 */ 926 926 if (sk->sk_shutdown & RCV_SHUTDOWN || 927 927 vsk->peer_shutdown & SEND_SHUTDOWN) { 928 - mask |= POLLIN | POLLRDNORM; 928 + mask |= EPOLLIN | EPOLLRDNORM; 929 929 } 930 930 931 931 /* Connected sockets that can produce data can be written. */ ··· 935 935 int ret = transport->notify_poll_out( 936 936 vsk, 1, &space_avail_now); 937 937 if (ret < 0) { 938 - mask |= POLLERR; 938 + mask |= EPOLLERR; 939 939 } else { 940 940 if (space_avail_now) 941 - /* Remove POLLWRBAND since INET 941 + /* Remove EPOLLWRBAND since INET 942 942 * sockets are not setting it. 943 943 */ 944 - mask |= POLLOUT | POLLWRNORM; 944 + mask |= EPOLLOUT | EPOLLWRNORM; 945 945 946 946 } 947 947 } 948 948 } 949 949 950 950 /* Simulate INET socket poll behaviors, which sets 951 - * POLLOUT|POLLWRNORM when peer is closed and nothing to read, 951 + * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read, 952 952 * but local send is not shutdown. 953 953 */ 954 954 if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) { 955 955 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 956 - mask |= POLLOUT | POLLWRNORM; 956 + mask |= EPOLLOUT | EPOLLWRNORM; 957 957 958 958 } 959 959
+1 -1
security/apparmor/apparmorfs.c
··· 580 580 mutex_lock_nested(&rev->ns->lock, rev->ns->level); 581 581 poll_wait(file, &rev->ns->wait, pt); 582 582 if (rev->last_read < rev->ns->revision) 583 - mask |= POLLIN | POLLRDNORM; 583 + mask |= EPOLLIN | EPOLLRDNORM; 584 584 mutex_unlock(&rev->ns->lock); 585 585 } 586 586
+3 -3
security/tomoyo/audit.c
··· 456 456 * @file: Pointer to "struct file". 457 457 * @wait: Pointer to "poll_table". Maybe NULL. 458 458 * 459 - * Returns POLLIN | POLLRDNORM when ready to read an audit log. 459 + * Returns EPOLLIN | EPOLLRDNORM when ready to read an audit log. 460 460 */ 461 461 __poll_t tomoyo_poll_log(struct file *file, poll_table *wait) 462 462 { 463 463 if (tomoyo_log_count) 464 - return POLLIN | POLLRDNORM; 464 + return EPOLLIN | EPOLLRDNORM; 465 465 poll_wait(file, &tomoyo_log_wait, wait); 466 466 if (tomoyo_log_count) 467 - return POLLIN | POLLRDNORM; 467 + return EPOLLIN | EPOLLRDNORM; 468 468 return 0; 469 469 }
+7 -7
security/tomoyo/common.c
··· 2116 2116 * @file: Pointer to "struct file". 2117 2117 * @wait: Pointer to "poll_table". 2118 2118 * 2119 - * Returns POLLIN | POLLRDNORM when ready to read, 0 otherwise. 2119 + * Returns EPOLLIN | EPOLLRDNORM when ready to read, 0 otherwise. 2120 2120 * 2121 2121 * Waits for access requests which violated policy in enforcing mode. 2122 2122 */ 2123 2123 static __poll_t tomoyo_poll_query(struct file *file, poll_table *wait) 2124 2124 { 2125 2125 if (!list_empty(&tomoyo_query_list)) 2126 - return POLLIN | POLLRDNORM; 2126 + return EPOLLIN | EPOLLRDNORM; 2127 2127 poll_wait(file, &tomoyo_query_wait, wait); 2128 2128 if (!list_empty(&tomoyo_query_list)) 2129 - return POLLIN | POLLRDNORM; 2129 + return EPOLLIN | EPOLLRDNORM; 2130 2130 return 0; 2131 2131 } 2132 2132 ··· 2450 2450 * @file: Pointer to "struct file". 2451 2451 * @wait: Pointer to "poll_table". Maybe NULL. 2452 2452 * 2453 - * Returns POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM if ready to read/write, 2454 - * POLLOUT | POLLWRNORM otherwise. 2453 + * Returns EPOLLIN | EPOLLRDNORM | EPOLLOUT | EPOLLWRNORM if ready to read/write, 2454 + * EPOLLOUT | EPOLLWRNORM otherwise. 2455 2455 */ 2456 2456 __poll_t tomoyo_poll_control(struct file *file, poll_table *wait) 2457 2457 { 2458 2458 struct tomoyo_io_buffer *head = file->private_data; 2459 2459 if (head->poll) 2460 - return head->poll(file, wait) | POLLOUT | POLLWRNORM; 2461 - return POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM; 2460 + return head->poll(file, wait) | EPOLLOUT | EPOLLWRNORM; 2461 + return EPOLLIN | EPOLLRDNORM | EPOLLOUT | EPOLLWRNORM; 2462 2462 } 2463 2463 2464 2464 /**
+2 -2
security/tomoyo/securityfs_if.c
··· 154 154 * @file: Pointer to "struct file". 155 155 * @wait: Pointer to "poll_table". Maybe NULL. 156 156 * 157 - * Returns POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM if ready to read/write, 158 - * POLLOUT | POLLWRNORM otherwise. 157 + * Returns EPOLLIN | EPOLLRDNORM | EPOLLOUT | EPOLLWRNORM if ready to read/write, 158 + * EPOLLOUT | EPOLLWRNORM otherwise. 159 159 */ 160 160 static __poll_t tomoyo_poll(struct file *file, poll_table *wait) 161 161 {
+5 -5
sound/core/compress_offload.c
··· 399 399 static __poll_t snd_compr_get_poll(struct snd_compr_stream *stream) 400 400 { 401 401 if (stream->direction == SND_COMPRESS_PLAYBACK) 402 - return POLLOUT | POLLWRNORM; 402 + return EPOLLOUT | EPOLLWRNORM; 403 403 else 404 - return POLLIN | POLLRDNORM; 404 + return EPOLLIN | EPOLLRDNORM; 405 405 } 406 406 407 407 static __poll_t snd_compr_poll(struct file *f, poll_table *wait) ··· 412 412 __poll_t retval = 0; 413 413 414 414 if (snd_BUG_ON(!data)) 415 - return POLLERR; 415 + return EPOLLERR; 416 416 417 417 stream = &data->stream; 418 418 ··· 421 421 switch (stream->runtime->state) { 422 422 case SNDRV_PCM_STATE_OPEN: 423 423 case SNDRV_PCM_STATE_XRUN: 424 - retval = snd_compr_get_poll(stream) | POLLERR; 424 + retval = snd_compr_get_poll(stream) | EPOLLERR; 425 425 goto out; 426 426 default: 427 427 break; ··· 447 447 retval = snd_compr_get_poll(stream); 448 448 break; 449 449 default: 450 - retval = snd_compr_get_poll(stream) | POLLERR; 450 + retval = snd_compr_get_poll(stream) | EPOLLERR; 451 451 break; 452 452 } 453 453 out:
+1 -1
sound/core/control.c
··· 1679 1679 1680 1680 mask = 0; 1681 1681 if (!list_empty(&ctl->events)) 1682 - mask |= POLLIN | POLLRDNORM; 1682 + mask |= EPOLLIN | EPOLLRDNORM; 1683 1683 1684 1684 return mask; 1685 1685 }
+2 -2
sound/core/info.c
··· 214 214 data->file_private_data, 215 215 file, wait); 216 216 if (entry->c.ops->read) 217 - mask |= POLLIN | POLLRDNORM; 217 + mask |= EPOLLIN | EPOLLRDNORM; 218 218 if (entry->c.ops->write) 219 - mask |= POLLOUT | POLLWRNORM; 219 + mask |= EPOLLOUT | EPOLLWRNORM; 220 220 return mask; 221 221 } 222 222
+1 -1
sound/core/init.c
··· 346 346 347 347 static __poll_t snd_disconnect_poll(struct file * file, poll_table * wait) 348 348 { 349 - return POLLERR | POLLNVAL; 349 + return EPOLLERR | EPOLLNVAL; 350 350 } 351 351 352 352 static long snd_disconnect_ioctl(struct file *file,
+2 -2
sound/core/oss/pcm_oss.c
··· 2705 2705 if (runtime->status->state != SNDRV_PCM_STATE_DRAINING && 2706 2706 (runtime->status->state != SNDRV_PCM_STATE_RUNNING || 2707 2707 snd_pcm_oss_playback_ready(psubstream))) 2708 - mask |= POLLOUT | POLLWRNORM; 2708 + mask |= EPOLLOUT | EPOLLWRNORM; 2709 2709 snd_pcm_stream_unlock_irq(psubstream); 2710 2710 } 2711 2711 if (csubstream != NULL) { ··· 2715 2715 snd_pcm_stream_lock_irq(csubstream); 2716 2716 if ((ostate = runtime->status->state) != SNDRV_PCM_STATE_RUNNING || 2717 2717 snd_pcm_oss_capture_ready(csubstream)) 2718 - mask |= POLLIN | POLLRDNORM; 2718 + mask |= EPOLLIN | EPOLLRDNORM; 2719 2719 snd_pcm_stream_unlock_irq(csubstream); 2720 2720 if (ostate != SNDRV_PCM_STATE_RUNNING && runtime->oss.trigger) { 2721 2721 struct snd_pcm_oss_file ofile;
+7 -7
sound/core/pcm_native.c
··· 3147 3147 3148 3148 substream = pcm_file->substream; 3149 3149 if (PCM_RUNTIME_CHECK(substream)) 3150 - return POLLOUT | POLLWRNORM | POLLERR; 3150 + return EPOLLOUT | EPOLLWRNORM | EPOLLERR; 3151 3151 runtime = substream->runtime; 3152 3152 3153 3153 poll_wait(file, &runtime->sleep, wait); ··· 3159 3159 case SNDRV_PCM_STATE_PREPARED: 3160 3160 case SNDRV_PCM_STATE_PAUSED: 3161 3161 if (avail >= runtime->control->avail_min) { 3162 - mask = POLLOUT | POLLWRNORM; 3162 + mask = EPOLLOUT | EPOLLWRNORM; 3163 3163 break; 3164 3164 } 3165 3165 /* Fall through */ ··· 3167 3167 mask = 0; 3168 3168 break; 3169 3169 default: 3170 - mask = POLLOUT | POLLWRNORM | POLLERR; 3170 + mask = EPOLLOUT | EPOLLWRNORM | EPOLLERR; 3171 3171 break; 3172 3172 } 3173 3173 snd_pcm_stream_unlock_irq(substream); ··· 3186 3186 3187 3187 substream = pcm_file->substream; 3188 3188 if (PCM_RUNTIME_CHECK(substream)) 3189 - return POLLIN | POLLRDNORM | POLLERR; 3189 + return EPOLLIN | EPOLLRDNORM | EPOLLERR; 3190 3190 runtime = substream->runtime; 3191 3191 3192 3192 poll_wait(file, &runtime->sleep, wait); ··· 3198 3198 case SNDRV_PCM_STATE_PREPARED: 3199 3199 case SNDRV_PCM_STATE_PAUSED: 3200 3200 if (avail >= runtime->control->avail_min) { 3201 - mask = POLLIN | POLLRDNORM; 3201 + mask = EPOLLIN | EPOLLRDNORM; 3202 3202 break; 3203 3203 } 3204 3204 mask = 0; 3205 3205 break; 3206 3206 case SNDRV_PCM_STATE_DRAINING: 3207 3207 if (avail > 0) { 3208 - mask = POLLIN | POLLRDNORM; 3208 + mask = EPOLLIN | EPOLLRDNORM; 3209 3209 break; 3210 3210 } 3211 3211 /* Fall through */ 3212 3212 default: 3213 - mask = POLLIN | POLLRDNORM | POLLERR; 3213 + mask = EPOLLIN | EPOLLRDNORM | EPOLLERR; 3214 3214 break; 3215 3215 } 3216 3216 snd_pcm_stream_unlock_irq(substream);
+2 -2
sound/core/rawmidi.c
··· 1385 1385 mask = 0; 1386 1386 if (rfile->input != NULL) { 1387 1387 if (snd_rawmidi_ready(rfile->input)) 1388 - mask |= POLLIN | POLLRDNORM; 1388 + mask |= EPOLLIN | EPOLLRDNORM; 1389 1389 } 1390 1390 if (rfile->output != NULL) { 1391 1391 if (snd_rawmidi_ready(rfile->output)) 1392 - mask |= POLLOUT | POLLWRNORM; 1392 + mask |= EPOLLOUT | EPOLLWRNORM; 1393 1393 } 1394 1394 return mask; 1395 1395 }
+2 -2
sound/core/seq/oss/seq_oss_rw.c
··· 204 204 /* input */ 205 205 if (dp->readq && is_read_mode(dp->file_mode)) { 206 206 if (snd_seq_oss_readq_poll(dp->readq, file, wait)) 207 - mask |= POLLIN | POLLRDNORM; 207 + mask |= EPOLLIN | EPOLLRDNORM; 208 208 } 209 209 210 210 /* output */ 211 211 if (dp->writeq && is_write_mode(dp->file_mode)) { 212 212 if (snd_seq_kernel_client_write_poll(dp->cseq, file, wait)) 213 - mask |= POLLOUT | POLLWRNORM; 213 + mask |= EPOLLOUT | EPOLLWRNORM; 214 214 } 215 215 return mask; 216 216 }
+2 -2
sound/core/seq/seq_clientmgr.c
··· 1101 1101 1102 1102 /* check if data is available in the outqueue */ 1103 1103 if (snd_seq_fifo_poll_wait(client->data.user.fifo, file, wait)) 1104 - mask |= POLLIN | POLLRDNORM; 1104 + mask |= EPOLLIN | EPOLLRDNORM; 1105 1105 } 1106 1106 1107 1107 if (snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT) { ··· 1109 1109 /* check if data is available in the pool */ 1110 1110 if (!snd_seq_write_pool_allocated(client) || 1111 1111 snd_seq_pool_poll_wait(client->pool, file, wait)) 1112 - mask |= POLLOUT | POLLWRNORM; 1112 + mask |= EPOLLOUT | EPOLLWRNORM; 1113 1113 } 1114 1114 1115 1115 return mask;
+2 -2
sound/core/timer.c
··· 2084 2084 mask = 0; 2085 2085 spin_lock_irq(&tu->qlock); 2086 2086 if (tu->qused) 2087 - mask |= POLLIN | POLLRDNORM; 2087 + mask |= EPOLLIN | EPOLLRDNORM; 2088 2088 if (tu->disconnected) 2089 - mask |= POLLERR; 2089 + mask |= EPOLLERR; 2090 2090 spin_unlock_irq(&tu->qlock); 2091 2091 2092 2092 return mask;
+1 -1
sound/firewire/bebob/bebob_hwdep.c
··· 63 63 64 64 spin_lock_irq(&bebob->lock); 65 65 if (bebob->dev_lock_changed) 66 - events = POLLIN | POLLRDNORM; 66 + events = EPOLLIN | EPOLLRDNORM; 67 67 else 68 68 events = 0; 69 69 spin_unlock_irq(&bebob->lock);
+1 -1
sound/firewire/dice/dice-hwdep.c
··· 62 62 63 63 spin_lock_irq(&dice->lock); 64 64 if (dice->dev_lock_changed || dice->notification_bits != 0) 65 - events = POLLIN | POLLRDNORM; 65 + events = EPOLLIN | EPOLLRDNORM; 66 66 else 67 67 events = 0; 68 68 spin_unlock_irq(&dice->lock);
+1 -1
sound/firewire/digi00x/digi00x-hwdep.c
··· 70 70 71 71 spin_lock_irq(&dg00x->lock); 72 72 if (dg00x->dev_lock_changed || dg00x->msg) 73 - events = POLLIN | POLLRDNORM; 73 + events = EPOLLIN | EPOLLRDNORM; 74 74 else 75 75 events = 0; 76 76 spin_unlock_irq(&dg00x->lock);
+1 -1
sound/firewire/fireface/ff-hwdep.c
··· 62 62 63 63 spin_lock_irq(&ff->lock); 64 64 if (ff->dev_lock_changed) 65 - events = POLLIN | POLLRDNORM; 65 + events = EPOLLIN | EPOLLRDNORM; 66 66 else 67 67 events = 0; 68 68 spin_unlock_irq(&ff->lock);
+2 -2
sound/firewire/fireworks/fireworks_hwdep.c
··· 194 194 195 195 spin_lock_irq(&efw->lock); 196 196 if (efw->dev_lock_changed || efw->pull_ptr != efw->push_ptr) 197 - events = POLLIN | POLLRDNORM; 197 + events = EPOLLIN | EPOLLRDNORM; 198 198 else 199 199 events = 0; 200 200 spin_unlock_irq(&efw->lock); 201 201 202 - return events | POLLOUT; 202 + return events | EPOLLOUT; 203 203 } 204 204 205 205 static int
+2 -2
sound/firewire/motu/motu-hwdep.c
··· 69 69 70 70 spin_lock_irq(&motu->lock); 71 71 if (motu->dev_lock_changed || motu->msg) 72 - events = POLLIN | POLLRDNORM; 72 + events = EPOLLIN | EPOLLRDNORM; 73 73 else 74 74 events = 0; 75 75 spin_unlock_irq(&motu->lock); 76 76 77 - return events | POLLOUT; 77 + return events | EPOLLOUT; 78 78 } 79 79 80 80 static int hwdep_get_info(struct snd_motu *motu, void __user *arg)
+1 -1
sound/firewire/oxfw/oxfw-hwdep.c
··· 62 62 63 63 spin_lock_irq(&oxfw->lock); 64 64 if (oxfw->dev_lock_changed) 65 - events = POLLIN | POLLRDNORM; 65 + events = EPOLLIN | EPOLLRDNORM; 66 66 else 67 67 events = 0; 68 68 spin_unlock_irq(&oxfw->lock);
+1 -1
sound/firewire/tascam/tascam-hwdep.c
··· 60 60 61 61 spin_lock_irq(&tscm->lock); 62 62 if (tscm->dev_lock_changed) 63 - events = POLLIN | POLLRDNORM; 63 + events = EPOLLIN | EPOLLRDNORM; 64 64 else 65 65 events = 0; 66 66 spin_unlock_irq(&tscm->lock);
+1 -1
sound/oss/dmasound/dmasound_core.c
··· 684 684 poll_wait(file, &write_sq.action_queue, wait); 685 685 if (file->f_mode & FMODE_WRITE) 686 686 if (write_sq.count < write_sq.max_active || write_sq.block_size - write_sq.rear_size > 0) 687 - mask |= POLLOUT | POLLWRNORM; 687 + mask |= EPOLLOUT | EPOLLWRNORM; 688 688 return mask; 689 689 690 690 }
+1 -1
sound/usb/mixer_quirks.c
··· 246 246 struct usb_mixer_interface *mixer = hw->private_data; 247 247 248 248 poll_wait(file, &mixer->rc_waitq, wait); 249 - return mixer->rc_code ? POLLIN | POLLRDNORM : 0; 249 + return mixer->rc_code ? EPOLLIN | EPOLLRDNORM : 0; 250 250 } 251 251 252 252 static int snd_usb_soundblaster_remote_init(struct usb_mixer_interface *mixer)
+2 -2
sound/usb/usx2y/us122l.c
··· 280 280 281 281 poll_wait(file, &us122l->sk.sleep, wait); 282 282 283 - mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR; 283 + mask = EPOLLIN | EPOLLOUT | EPOLLWRNORM | EPOLLERR; 284 284 if (mutex_trylock(&us122l->mutex)) { 285 285 struct usb_stream *s = us122l->sk.s; 286 286 if (s && s->state == usb_stream_ready) { ··· 290 290 polled = &us122l->second_periods_polled; 291 291 if (*polled != s->periods_done) { 292 292 *polled = s->periods_done; 293 - mask = POLLIN | POLLOUT | POLLWRNORM; 293 + mask = EPOLLIN | EPOLLOUT | EPOLLWRNORM; 294 294 } else 295 295 mask = 0; 296 296 }
+2 -2
sound/usb/usx2y/usX2Yhwdep.c
··· 92 92 struct usX2Ydev *us428 = hw->private_data; 93 93 struct us428ctls_sharedmem *shm = us428->us428ctls_sharedmem; 94 94 if (us428->chip_status & USX2Y_STAT_CHIP_HUP) 95 - return POLLHUP; 95 + return EPOLLHUP; 96 96 97 97 poll_wait(file, &us428->us428ctls_wait_queue_head, wait); 98 98 99 99 if (shm != NULL && shm->CtlSnapShotLast != shm->CtlSnapShotRed) 100 - mask |= POLLIN; 100 + mask |= EPOLLIN; 101 101 102 102 return mask; 103 103 }
+4 -4
virt/kvm/eventfd.c
··· 194 194 unsigned seq; 195 195 int idx; 196 196 197 - if (flags & POLLIN) { 197 + if (flags & EPOLLIN) { 198 198 idx = srcu_read_lock(&kvm->irq_srcu); 199 199 do { 200 200 seq = read_seqcount_begin(&irqfd->irq_entry_sc); ··· 208 208 srcu_read_unlock(&kvm->irq_srcu, idx); 209 209 } 210 210 211 - if (flags & POLLHUP) { 211 + if (flags & EPOLLHUP) { 212 212 /* The eventfd is closing, detach from KVM */ 213 213 unsigned long flags; 214 214 ··· 399 399 */ 400 400 events = f.file->f_op->poll(f.file, &irqfd->pt); 401 401 402 - if (events & POLLIN) 402 + if (events & EPOLLIN) 403 403 schedule_work(&irqfd->inject); 404 404 405 405 /* 406 406 * do not drop the file until the irqfd is fully initialized, otherwise 407 - * we might race against the POLLHUP 407 + * we might race against the EPOLLHUP 408 408 */ 409 409 fdput(f); 410 410 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS