Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'bitmap-5.17-rc1' of git://github.com/norov/linux

Pull bitmap updates from Yury Norov:

- introduce for_each_set_bitrange()

- use find_first_*_bit() instead of find_next_*_bit() where possible

- unify for_each_bit() macros

* tag 'bitmap-5.17-rc1' of git://github.com/norov/linux:
vsprintf: rework bitmap_list_string
lib: bitmap: add performance test for bitmap_print_to_pagebuf
bitmap: unify find_bit operations
mm/percpu: micro-optimize pcpu_is_populated()
Replace for_each_*_bit_from() with for_each_*_bit() where appropriate
find: micro-optimize for_each_{set,clear}_bit()
include/linux: move for_each_bit() macros from bitops.h to find.h
cpumask: replace cpumask_next_* with cpumask_first_* where appropriate
tools: sync tools/bitmap with mother linux
all: replace find_next{,_zero}_bit with find_first{,_zero}_bit where appropriate
cpumask: use find_first_and_bit()
lib: add find_first_and_bit()
arch: remove GENERIC_FIND_FIRST_BIT entirely
include: move find.h from asm_generic to linux
bitops: move find_bit_*_le functions from le.h to find.h
bitops: protect find_first_{,zero}_bit properly

+635 -438
+2 -2
MAINTAINERS
··· 3410 3410 R: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 3411 3411 R: Rasmus Villemoes <linux@rasmusvillemoes.dk> 3412 3412 S: Maintained 3413 - F: include/asm-generic/bitops/find.h 3414 3413 F: include/linux/bitmap.h 3414 + F: include/linux/find.h 3415 3415 F: lib/bitmap.c 3416 3416 F: lib/find_bit.c 3417 3417 F: lib/find_bit_benchmark.c 3418 3418 F: lib/test_bitmap.c 3419 - F: tools/include/asm-generic/bitops/find.h 3420 3419 F: tools/include/linux/bitmap.h 3420 + F: tools/include/linux/find.h 3421 3421 F: tools/lib/bitmap.c 3422 3422 F: tools/lib/find_bit.c 3423 3423
-2
arch/alpha/include/asm/bitops.h
··· 430 430 431 431 #endif /* __KERNEL__ */ 432 432 433 - #include <asm-generic/bitops/find.h> 434 - 435 433 #ifdef __KERNEL__ 436 434 437 435 /*
-1
arch/arc/Kconfig
··· 20 20 select COMMON_CLK 21 21 select DMA_DIRECT_REMAP 22 22 select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC) 23 - select GENERIC_FIND_FIRST_BIT 24 23 # for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP 25 24 select GENERIC_IRQ_SHOW 26 25 select GENERIC_PCI_IOMAP
-1
arch/arc/include/asm/bitops.h
··· 189 189 #include <asm-generic/bitops/atomic.h> 190 190 #include <asm-generic/bitops/non-atomic.h> 191 191 192 - #include <asm-generic/bitops/find.h> 193 192 #include <asm-generic/bitops/le.h> 194 193 #include <asm-generic/bitops/ext2-atomic-setbit.h> 195 194
-1
arch/arm/include/asm/bitops.h
··· 264 264 265 265 #endif 266 266 267 - #include <asm-generic/bitops/find.h> 268 267 #include <asm-generic/bitops/le.h> 269 268 270 269 /*
-1
arch/arm64/Kconfig
··· 120 120 select GENERIC_CPU_AUTOPROBE 121 121 select GENERIC_CPU_VULNERABILITIES 122 122 select GENERIC_EARLY_IOREMAP 123 - select GENERIC_FIND_FIRST_BIT 124 123 select GENERIC_IDLE_POLL_SETUP 125 124 select GENERIC_IRQ_IPI 126 125 select GENERIC_IRQ_PROBE
-1
arch/arm64/include/asm/bitops.h
··· 18 18 19 19 #include <asm-generic/bitops/ffz.h> 20 20 #include <asm-generic/bitops/fls64.h> 21 - #include <asm-generic/bitops/find.h> 22 21 23 22 #include <asm-generic/bitops/sched.h> 24 23 #include <asm-generic/bitops/hweight.h>
-1
arch/csky/include/asm/bitops.h
··· 59 59 60 60 #include <asm-generic/bitops/ffz.h> 61 61 #include <asm-generic/bitops/fls64.h> 62 - #include <asm-generic/bitops/find.h> 63 62 64 63 #ifndef _LINUX_BITOPS_H 65 64 #error only <linux/bitops.h> can be included directly
-1
arch/h8300/include/asm/bitops.h
··· 168 168 return result; 169 169 } 170 170 171 - #include <asm-generic/bitops/find.h> 172 171 #include <asm-generic/bitops/sched.h> 173 172 #include <asm-generic/bitops/hweight.h> 174 173 #include <asm-generic/bitops/lock.h>
-1
arch/hexagon/include/asm/bitops.h
··· 271 271 } 272 272 273 273 #include <asm-generic/bitops/lock.h> 274 - #include <asm-generic/bitops/find.h> 275 274 276 275 #include <asm-generic/bitops/fls64.h> 277 276 #include <asm-generic/bitops/sched.h>
-2
arch/ia64/include/asm/bitops.h
··· 441 441 442 442 #endif /* __KERNEL__ */ 443 443 444 - #include <asm-generic/bitops/find.h> 445 - 446 444 #ifdef __KERNEL__ 447 445 448 446 #include <asm-generic/bitops/le.h>
-2
arch/m68k/include/asm/bitops.h
··· 529 529 #include <asm-generic/bitops/le.h> 530 530 #endif /* __KERNEL__ */ 531 531 532 - #include <asm-generic/bitops/find.h> 533 - 534 532 #endif /* _M68K_BITOPS_H */
-1
arch/mips/Kconfig
··· 32 32 select GENERIC_ATOMIC64 if !64BIT 33 33 select GENERIC_CMOS_UPDATE 34 34 select GENERIC_CPU_AUTOPROBE 35 - select GENERIC_FIND_FIRST_BIT 36 35 select GENERIC_GETTIMEOFDAY 37 36 select GENERIC_IOMAP 38 37 select GENERIC_IRQ_PROBE
-1
arch/mips/include/asm/bitops.h
··· 444 444 } 445 445 446 446 #include <asm-generic/bitops/ffz.h> 447 - #include <asm-generic/bitops/find.h> 448 447 449 448 #ifdef __KERNEL__ 450 449
-1
arch/openrisc/include/asm/bitops.h
··· 30 30 #include <asm/bitops/fls.h> 31 31 #include <asm/bitops/__fls.h> 32 32 #include <asm-generic/bitops/fls64.h> 33 - #include <asm-generic/bitops/find.h> 34 33 35 34 #ifndef _LINUX_BITOPS_H 36 35 #error only <linux/bitops.h> can be included directly
-1
arch/parisc/include/asm/bitops.h
··· 203 203 #include <asm-generic/bitops/hweight.h> 204 204 #include <asm-generic/bitops/lock.h> 205 205 #include <asm-generic/bitops/sched.h> 206 - #include <asm-generic/bitops/find.h> 207 206 #include <asm-generic/bitops/le.h> 208 207 #include <asm-generic/bitops/ext2-atomic-setbit.h> 209 208
-2
arch/powerpc/include/asm/bitops.h
··· 328 328 #include <asm-generic/bitops/hweight.h> 329 329 #endif 330 330 331 - #include <asm-generic/bitops/find.h> 332 - 333 331 /* wrappers that deal with KASAN instrumentation */ 334 332 #include <asm-generic/bitops/instrumented-atomic.h> 335 333 #include <asm-generic/bitops/instrumented-lock.h>
+2 -2
arch/powerpc/platforms/pasemi/dma_lib.c
··· 375 375 int bit; 376 376 377 377 retry: 378 - bit = find_next_bit(flags_free, MAX_FLAGS, 0); 378 + bit = find_first_bit(flags_free, MAX_FLAGS); 379 379 if (bit >= MAX_FLAGS) 380 380 return -ENOSPC; 381 381 if (!test_and_clear_bit(bit, flags_free)) ··· 440 440 int bit; 441 441 442 442 retry: 443 - bit = find_next_bit(fun_free, MAX_FLAGS, 0); 443 + bit = find_first_bit(fun_free, MAX_FLAGS); 444 444 if (bit >= MAX_FLAGS) 445 445 return -ENOSPC; 446 446 if (!test_and_clear_bit(bit, fun_free))
-1
arch/riscv/include/asm/bitops.h
··· 20 20 #include <asm-generic/bitops/fls.h> 21 21 #include <asm-generic/bitops/__fls.h> 22 22 #include <asm-generic/bitops/fls64.h> 23 - #include <asm-generic/bitops/find.h> 24 23 #include <asm-generic/bitops/sched.h> 25 24 #include <asm-generic/bitops/ffs.h> 26 25
-1
arch/s390/Kconfig
··· 127 127 select GENERIC_CPU_AUTOPROBE 128 128 select GENERIC_CPU_VULNERABILITIES 129 129 select GENERIC_ENTRY 130 - select GENERIC_FIND_FIRST_BIT 131 130 select GENERIC_GETTIMEOFDAY 132 131 select GENERIC_PTDUMP 133 132 select GENERIC_SMP_IDLE_THREAD
-1
arch/s390/include/asm/bitops.h
··· 387 387 #endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */ 388 388 389 389 #include <asm-generic/bitops/ffz.h> 390 - #include <asm-generic/bitops/find.h> 391 390 #include <asm-generic/bitops/hweight.h> 392 391 #include <asm-generic/bitops/sched.h> 393 392 #include <asm-generic/bitops/le.h>
+1 -1
arch/s390/kvm/kvm-s390.c
··· 1990 1990 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs); 1991 1991 while (ofs >= ms->npages && (mnode = rb_next(mnode))) { 1992 1992 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); 1993 - ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0); 1993 + ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages); 1994 1994 } 1995 1995 return ms->base_gfn + ofs; 1996 1996 }
-1
arch/sh/include/asm/bitops.h
··· 68 68 #include <asm-generic/bitops/fls64.h> 69 69 70 70 #include <asm-generic/bitops/le.h> 71 - #include <asm-generic/bitops/find.h> 72 71 73 72 #endif /* __ASM_SH_BITOPS_H */
-1
arch/sparc/include/asm/bitops_32.h
··· 100 100 #include <asm-generic/bitops/fls64.h> 101 101 #include <asm-generic/bitops/hweight.h> 102 102 #include <asm-generic/bitops/lock.h> 103 - #include <asm-generic/bitops/find.h> 104 103 #include <asm-generic/bitops/le.h> 105 104 #include <asm-generic/bitops/ext2-atomic.h> 106 105
-2
arch/sparc/include/asm/bitops_64.h
··· 52 52 #include <asm-generic/bitops/lock.h> 53 53 #endif /* __KERNEL__ */ 54 54 55 - #include <asm-generic/bitops/find.h> 56 - 57 55 #ifdef __KERNEL__ 58 56 59 57 #include <asm-generic/bitops/le.h>
-1
arch/x86/Kconfig
··· 137 137 select GENERIC_CPU_VULNERABILITIES 138 138 select GENERIC_EARLY_IOREMAP 139 139 select GENERIC_ENTRY 140 - select GENERIC_FIND_FIRST_BIT 141 140 select GENERIC_IOMAP 142 141 select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP 143 142 select GENERIC_IRQ_MATRIX_ALLOCATOR if X86_LOCAL_APIC
-2
arch/x86/include/asm/bitops.h
··· 380 380 #include <asm-generic/bitops/fls64.h> 381 381 #endif 382 382 383 - #include <asm-generic/bitops/find.h> 384 - 385 383 #include <asm-generic/bitops/sched.h> 386 384 387 385 #include <asm/arch_hweight.h>
+2 -2
arch/x86/kernel/apic/vector.c
··· 760 760 761 761 void __init lapic_assign_system_vectors(void) 762 762 { 763 - unsigned int i, vector = 0; 763 + unsigned int i, vector; 764 764 765 - for_each_set_bit_from(vector, system_vectors, NR_VECTORS) 765 + for_each_set_bit(vector, system_vectors, NR_VECTORS) 766 766 irq_matrix_assign_system(vector_matrix, vector, false); 767 767 768 768 if (nr_legacy_irqs() > 1)
-1
arch/x86/um/Kconfig
··· 8 8 9 9 config UML_X86 10 10 def_bool y 11 - select GENERIC_FIND_FIRST_BIT 12 11 13 12 config 64BIT 14 13 bool "64-bit kernel" if "$(SUBARCH)" = "x86"
-1
arch/xtensa/include/asm/bitops.h
··· 205 205 #undef BIT_OP 206 206 #undef TEST_AND_BIT_OP 207 207 208 - #include <asm-generic/bitops/find.h> 209 208 #include <asm-generic/bitops/le.h> 210 209 211 210 #include <asm-generic/bitops/ext2-atomic-setbit.h>
+1 -1
block/blk-mq.c
··· 3285 3285 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu, 3286 3286 struct blk_mq_hw_ctx *hctx) 3287 3287 { 3288 - if (cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu) 3288 + if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu) 3289 3289 return false; 3290 3290 if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids) 3291 3291 return false;
+1 -1
drivers/block/rnbd/rnbd-clt.c
··· 196 196 return per_cpu_ptr(sess->cpu_queues, bit); 197 197 } else if (cpu != 0) { 198 198 /* Search from 0 to cpu */ 199 - bit = find_next_bit(sess->cpu_queues_bm, cpu, 0); 199 + bit = find_first_bit(sess->cpu_queues_bm, cpu); 200 200 if (bit < cpu) 201 201 return per_cpu_ptr(sess->cpu_queues, bit); 202 202 }
+2 -2
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
··· 1047 1047 1048 1048 void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) 1049 1049 { 1050 - unsigned int i = 0; 1050 + unsigned int i; 1051 1051 1052 1052 dev_err(gpu->dev, "recover hung GPU!\n"); 1053 1053 ··· 1060 1060 1061 1061 /* complete all events, the GPU won't do it after the reset */ 1062 1062 spin_lock(&gpu->event_spinlock); 1063 - for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS) 1063 + for_each_set_bit(i, gpu->event_bitmap, ETNA_NR_EVENTS) 1064 1064 complete(&gpu->event_free); 1065 1065 bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS); 1066 1066 spin_unlock(&gpu->event_spinlock);
+1 -2
drivers/hwmon/ltc2992.c
··· 248 248 249 249 gpio_status = reg; 250 250 251 - gpio_nr = 0; 252 - for_each_set_bit_from(gpio_nr, mask, LTC2992_GPIO_NR) { 251 + for_each_set_bit(gpio_nr, mask, LTC2992_GPIO_NR) { 253 252 if (test_bit(LTC2992_GPIO_BIT(gpio_nr), &gpio_status)) 254 253 set_bit(gpio_nr, bits); 255 254 }
+1 -1
drivers/iio/adc/ad7124.c
··· 347 347 { 348 348 unsigned int free_cfg_slot; 349 349 350 - free_cfg_slot = find_next_zero_bit(&st->cfg_slots_status, AD7124_MAX_CONFIGS, 0); 350 + free_cfg_slot = find_first_zero_bit(&st->cfg_slots_status, AD7124_MAX_CONFIGS); 351 351 if (free_cfg_slot == AD7124_MAX_CONFIGS) 352 352 return -1; 353 353
+8 -8
drivers/infiniband/hw/irdma/hw.c
··· 1709 1709 */ 1710 1710 static void irdma_get_used_rsrc(struct irdma_device *iwdev) 1711 1711 { 1712 - iwdev->rf->used_pds = find_next_zero_bit(iwdev->rf->allocated_pds, 1713 - iwdev->rf->max_pd, 0); 1714 - iwdev->rf->used_qps = find_next_zero_bit(iwdev->rf->allocated_qps, 1715 - iwdev->rf->max_qp, 0); 1716 - iwdev->rf->used_cqs = find_next_zero_bit(iwdev->rf->allocated_cqs, 1717 - iwdev->rf->max_cq, 0); 1718 - iwdev->rf->used_mrs = find_next_zero_bit(iwdev->rf->allocated_mrs, 1719 - iwdev->rf->max_mr, 0); 1712 + iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds, 1713 + iwdev->rf->max_pd); 1714 + iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps, 1715 + iwdev->rf->max_qp); 1716 + iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs, 1717 + iwdev->rf->max_cq); 1718 + iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs, 1719 + iwdev->rf->max_mr); 1720 1720 } 1721 1721 1722 1722 void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
+1 -1
drivers/media/cec/core/cec-core.c
··· 106 106 107 107 /* Part 1: Find a free minor number */ 108 108 mutex_lock(&cec_devnode_lock); 109 - minor = find_next_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES, 0); 109 + minor = find_first_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES); 110 110 if (minor == CEC_NUM_DEVICES) { 111 111 mutex_unlock(&cec_devnode_lock); 112 112 pr_err("could not get a free minor\n");
+1 -1
drivers/media/mc/mc-devnode.c
··· 217 217 218 218 /* Part 1: Find a free minor number */ 219 219 mutex_lock(&media_devnode_lock); 220 - minor = find_next_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES, 0); 220 + minor = find_first_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES); 221 221 if (minor == MEDIA_NUM_DEVICES) { 222 222 mutex_unlock(&media_devnode_lock); 223 223 pr_err("could not get a free minor\n");
+1 -1
drivers/mmc/host/renesas_sdhi_core.c
··· 642 642 * is at least SH_MOBILE_SDHI_MIN_TAP_ROW probes long then use the 643 643 * center index as the tap, otherwise bail out. 644 644 */ 645 - bitmap_for_each_set_region(bitmap, rs, re, 0, taps_size) { 645 + for_each_set_bitrange(rs, re, bitmap, taps_size) { 646 646 if (re - rs > tap_cnt) { 647 647 tap_end = re; 648 648 tap_start = rs;
+1 -1
drivers/net/virtio_net.c
··· 2101 2101 stragglers = num_cpu >= vi->curr_queue_pairs ? 2102 2102 num_cpu % vi->curr_queue_pairs : 2103 2103 0; 2104 - cpu = cpumask_next(-1, cpu_online_mask); 2104 + cpu = cpumask_first(cpu_online_mask); 2105 2105 2106 2106 for (i = 0; i < vi->curr_queue_pairs; i++) { 2107 2107 group_size = stride + (i < stragglers ? 1 : 0);
+1 -1
drivers/pci/controller/dwc/pci-dra7xx.c
··· 213 213 if (!val) 214 214 return 0; 215 215 216 - pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0); 216 + pos = find_first_bit(&val, MAX_MSI_IRQS_PER_CTRL); 217 217 while (pos != MAX_MSI_IRQS_PER_CTRL) { 218 218 generic_handle_domain_irq(pp->irq_domain, 219 219 (index * MAX_MSI_IRQS_PER_CTRL) + pos);
+5 -5
drivers/scsi/lpfc/lpfc_sli.c
··· 17982 17982 * the driver starts at 0 each time. 17983 17983 */ 17984 17984 spin_lock_irq(&phba->hbalock); 17985 - xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, 17986 - phba->sli4_hba.max_cfg_param.max_xri, 0); 17985 + xri = find_first_zero_bit(phba->sli4_hba.xri_bmask, 17986 + phba->sli4_hba.max_cfg_param.max_xri); 17987 17987 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 17988 17988 spin_unlock_irq(&phba->hbalock); 17989 17989 return NO_XRI; ··· 19660 19660 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 19661 19661 rpi_limit = phba->sli4_hba.next_rpi; 19662 19662 19663 - rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 19663 + rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit); 19664 19664 if (rpi >= rpi_limit) 19665 19665 rpi = LPFC_RPI_ALLOC_ERROR; 19666 19666 else { ··· 20303 20303 * have been tested so that we can detect when we should 20304 20304 * change the priority level. 20305 20305 */ 20306 - next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 20307 - LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 20306 + next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask, 20307 + LPFC_SLI4_FCF_TBL_INDX_MAX); 20308 20308 } 20309 20309 20310 20310
+1 -1
drivers/soc/fsl/qbman/bman_portal.c
··· 155 155 } 156 156 157 157 spin_lock(&bman_lock); 158 - cpu = cpumask_next_zero(-1, &portal_cpus); 158 + cpu = cpumask_first_zero(&portal_cpus); 159 159 if (cpu >= nr_cpu_ids) { 160 160 __bman_portals_probed = 1; 161 161 /* unassigned portal, skip init */
+1 -1
drivers/soc/fsl/qbman/qman_portal.c
··· 248 248 pcfg->pools = qm_get_pools_sdqcr(); 249 249 250 250 spin_lock(&qman_lock); 251 - cpu = cpumask_next_zero(-1, &portal_cpus); 251 + cpu = cpumask_first_zero(&portal_cpus); 252 252 if (cpu >= nr_cpu_ids) { 253 253 __qman_portals_probed = 1; 254 254 /* unassigned portal, skip init */
+2 -2
drivers/soc/ti/k3-ringacc.c
··· 358 358 goto out; 359 359 360 360 if (flags & K3_RINGACC_RING_USE_PROXY) { 361 - proxy_id = find_next_zero_bit(ringacc->proxy_inuse, 362 - ringacc->num_proxies, 0); 361 + proxy_id = find_first_zero_bit(ringacc->proxy_inuse, 362 + ringacc->num_proxies); 363 363 if (proxy_id == ringacc->num_proxies) 364 364 goto error; 365 365 }
+1 -1
drivers/tty/n_tty.c
··· 1938 1938 more = n - (size - tail); 1939 1939 if (eol == N_TTY_BUF_SIZE && more) { 1940 1940 /* scan wrapped without finding set bit */ 1941 - eol = find_next_bit(ldata->read_flags, more, 0); 1941 + eol = find_first_bit(ldata->read_flags, more); 1942 1942 found = eol != more; 1943 1943 } else 1944 1944 found = eol != size;
+1 -2
drivers/virt/acrn/ioreq.c
··· 246 246 spin_lock_bh(&vm->ioreq_clients_lock); 247 247 client = vm->default_client; 248 248 if (client) { 249 - vcpu = find_next_bit(client->ioreqs_map, 250 - ACRN_IO_REQUEST_MAX, 0); 249 + vcpu = find_first_bit(client->ioreqs_map, ACRN_IO_REQUEST_MAX); 251 250 while (vcpu < ACRN_IO_REQUEST_MAX) { 252 251 acrn_ioreq_complete_request(client, vcpu, NULL); 253 252 vcpu = find_next_bit(client->ioreqs_map,
+4 -4
fs/f2fs/segment.c
··· 2555 2555 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint); 2556 2556 if (secno >= MAIN_SECS(sbi)) { 2557 2557 if (dir == ALLOC_RIGHT) { 2558 - secno = find_next_zero_bit(free_i->free_secmap, 2559 - MAIN_SECS(sbi), 0); 2558 + secno = find_first_zero_bit(free_i->free_secmap, 2559 + MAIN_SECS(sbi)); 2560 2560 f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi)); 2561 2561 } else { 2562 2562 go_left = 1; ··· 2571 2571 left_start--; 2572 2572 continue; 2573 2573 } 2574 - left_start = find_next_zero_bit(free_i->free_secmap, 2575 - MAIN_SECS(sbi), 0); 2574 + left_start = find_first_zero_bit(free_i->free_secmap, 2575 + MAIN_SECS(sbi)); 2576 2576 f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi)); 2577 2577 break; 2578 2578 }
+1 -1
fs/ocfs2/cluster/heartbeat.c
··· 379 379 380 380 o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap)); 381 381 /* lowest node as master node to make negotiate decision. */ 382 - master_node = find_next_bit(live_node_bitmap, O2NM_MAX_NODES, 0); 382 + master_node = find_first_bit(live_node_bitmap, O2NM_MAX_NODES); 383 383 384 384 if (master_node == o2nm_this_node()) { 385 385 if (!test_bit(master_node, reg->hr_nego_node_bitmap)) {
+2 -2
fs/ocfs2/dlm/dlmdomain.c
··· 1045 1045 int status, ret = 0, i; 1046 1046 char *p; 1047 1047 1048 - if (find_next_bit(node_map, O2NM_MAX_NODES, 0) >= O2NM_MAX_NODES) 1048 + if (find_first_bit(node_map, O2NM_MAX_NODES) >= O2NM_MAX_NODES) 1049 1049 goto bail; 1050 1050 1051 1051 qr = kzalloc(sizeof(struct dlm_query_region), GFP_KERNEL); ··· 1217 1217 struct o2nm_node *node; 1218 1218 int ret = 0, status, count, i; 1219 1219 1220 - if (find_next_bit(node_map, O2NM_MAX_NODES, 0) >= O2NM_MAX_NODES) 1220 + if (find_first_bit(node_map, O2NM_MAX_NODES) >= O2NM_MAX_NODES) 1221 1221 goto bail; 1222 1222 1223 1223 qn = kzalloc(sizeof(struct dlm_query_nodeinfo), GFP_KERNEL);
+9 -9
fs/ocfs2/dlm/dlmmaster.c
··· 861 861 * to see if there are any nodes that still need to be 862 862 * considered. these will not appear in the mle nodemap 863 863 * but they might own this lockres. wait on them. */ 864 - bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 864 + bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES); 865 865 if (bit < O2NM_MAX_NODES) { 866 866 mlog(0, "%s: res %.*s, At least one node (%d) " 867 867 "to recover before lock mastery can begin\n", ··· 912 912 dlm_wait_for_recovery(dlm); 913 913 914 914 spin_lock(&dlm->spinlock); 915 - bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 915 + bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES); 916 916 if (bit < O2NM_MAX_NODES) { 917 917 mlog(0, "%s: res %.*s, At least one node (%d) " 918 918 "to recover before lock mastery can begin\n", ··· 1079 1079 sleep = 1; 1080 1080 /* have all nodes responded? */ 1081 1081 if (voting_done && !*blocked) { 1082 - bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); 1082 + bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES); 1083 1083 if (dlm->node_num <= bit) { 1084 1084 /* my node number is lowest. 1085 1085 * now tell other nodes that I am ··· 1234 1234 } else { 1235 1235 mlog(ML_ERROR, "node down! %d\n", node); 1236 1236 if (blocked) { 1237 - int lowest = find_next_bit(mle->maybe_map, 1238 - O2NM_MAX_NODES, 0); 1237 + int lowest = find_first_bit(mle->maybe_map, 1238 + O2NM_MAX_NODES); 1239 1239 1240 1240 /* act like it was never there */ 1241 1241 clear_bit(node, mle->maybe_map); ··· 1795 1795 "MLE for it! (%.*s)\n", assert->node_idx, 1796 1796 namelen, name); 1797 1797 } else { 1798 - int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0); 1798 + int bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES); 1799 1799 if (bit >= O2NM_MAX_NODES) { 1800 1800 /* not necessarily an error, though less likely. 1801 1801 * could be master just re-asserting. */ ··· 2521 2521 } 2522 2522 2523 2523 if (!nonlocal) { 2524 - node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); 2524 + node_ref = find_first_bit(res->refmap, O2NM_MAX_NODES); 2525 2525 if (node_ref >= O2NM_MAX_NODES) 2526 2526 return 0; 2527 2527 } ··· 3303 3303 BUG_ON(mle->type != DLM_MLE_BLOCK); 3304 3304 3305 3305 spin_lock(&mle->spinlock); 3306 - bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); 3306 + bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES); 3307 3307 if (bit != dead_node) { 3308 3308 mlog(0, "mle found, but dead node %u would not have been " 3309 3309 "master\n", dead_node); ··· 3542 3542 spin_lock(&dlm->master_lock); 3543 3543 3544 3544 BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING); 3545 - BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES)); 3545 + BUG_ON((find_first_bit(dlm->domain_map, O2NM_MAX_NODES) < O2NM_MAX_NODES)); 3546 3546 3547 3547 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 3548 3548 bucket = dlm_master_hash(dlm, i);
+1 -1
fs/ocfs2/dlm/dlmrecovery.c
··· 451 451 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 452 452 int bit; 453 453 454 - bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0); 454 + bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES); 455 455 if (bit >= O2NM_MAX_NODES || bit < 0) 456 456 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 457 457 else
+1 -1
fs/ocfs2/dlm/dlmthread.c
··· 92 92 return 0; 93 93 94 94 /* Another node has this resource with this node as the master */ 95 - bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); 95 + bit = find_first_bit(res->refmap, O2NM_MAX_NODES); 96 96 if (bit < O2NM_MAX_NODES) 97 97 return 0; 98 98
-1
include/asm-generic/bitops.h
··· 20 20 #include <asm-generic/bitops/fls.h> 21 21 #include <asm-generic/bitops/__fls.h> 22 22 #include <asm-generic/bitops/fls64.h> 23 - #include <asm-generic/bitops/find.h> 24 23 25 24 #ifndef _LINUX_BITOPS_H 26 25 #error only <linux/bitops.h> can be included directly
+40 -14
include/asm-generic/bitops/find.h tools/include/linux/find.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _ASM_GENERIC_BITOPS_FIND_H_ 3 - #define _ASM_GENERIC_BITOPS_FIND_H_ 2 + #ifndef _TOOLS_LINUX_FIND_H_ 3 + #define _TOOLS_LINUX_FIND_H_ 4 + 5 + #ifndef _TOOLS_LINUX_BITMAP_H 6 + #error tools: only <linux/bitmap.h> can be included directly 7 + #endif 8 + 9 + #include <linux/bitops.h> 4 10 5 11 extern unsigned long _find_next_bit(const unsigned long *addr1, 6 12 const unsigned long *addr2, unsigned long nbits, 7 13 unsigned long start, unsigned long invert, unsigned long le); 8 14 extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size); 15 + extern unsigned long _find_first_and_bit(const unsigned long *addr1, 16 + const unsigned long *addr2, unsigned long size); 9 17 extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size); 10 18 extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size); 11 19 ··· 103 95 } 104 96 #endif 105 97 106 - #ifdef CONFIG_GENERIC_FIND_FIRST_BIT 107 - 98 + #ifndef find_first_bit 108 99 /** 109 100 * find_first_bit - find the first set bit in a memory region 110 101 * @addr: The address to start the search at ··· 123 116 124 117 return _find_first_bit(addr, size); 125 118 } 119 + #endif 126 120 121 + #ifndef find_first_and_bit 122 + /** 123 + * find_first_and_bit - find the first set bit in both memory regions 124 + * @addr1: The first address to base the search on 125 + * @addr2: The second address to base the search on 126 + * @size: The bitmap size in bits 127 + * 128 + * Returns the bit number for the next set bit 129 + * If no bits are set, returns @size. 130 + */ 131 + static inline 132 + unsigned long find_first_and_bit(const unsigned long *addr1, 133 + const unsigned long *addr2, 134 + unsigned long size) 135 + { 136 + if (small_const_nbits(size)) { 137 + unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0); 138 + 139 + return val ? __ffs(val) : size; 140 + } 141 + 142 + return _find_first_and_bit(addr1, addr2, size); 143 + } 144 + #endif 145 + 146 + #ifndef find_first_zero_bit 127 147 /** 128 148 * find_first_zero_bit - find the first cleared bit in a memory region 129 149 * @addr: The address to start the search at ··· 170 136 171 137 return _find_first_zero_bit(addr, size); 172 138 } 173 - #else /* CONFIG_GENERIC_FIND_FIRST_BIT */ 174 - 175 - #ifndef find_first_bit 176 - #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) 177 139 #endif 178 - #ifndef find_first_zero_bit 179 - #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) 180 - #endif 181 - 182 - #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ 183 140 184 141 #ifndef find_last_bit 185 142 /** ··· 210 185 #define find_first_clump8(clump, bits, size) \ 211 186 find_next_clump8((clump), (bits), (size), 0) 212 187 213 - #endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ 188 + 189 + #endif /*__LINUX_FIND_H_ */
-64
include/asm-generic/bitops/le.h
··· 2 2 #ifndef _ASM_GENERIC_BITOPS_LE_H_ 3 3 #define _ASM_GENERIC_BITOPS_LE_H_ 4 4 5 - #include <asm-generic/bitops/find.h> 6 5 #include <asm/types.h> 7 6 #include <asm/byteorder.h> 8 - #include <linux/swab.h> 9 7 10 8 #if defined(__LITTLE_ENDIAN) 11 9 12 10 #define BITOP_LE_SWIZZLE 0 13 11 14 - static inline unsigned long find_next_zero_bit_le(const void *addr, 15 - unsigned long size, unsigned long offset) 16 - { 17 - return find_next_zero_bit(addr, size, offset); 18 - } 19 - 20 - static inline unsigned long find_next_bit_le(const void *addr, 21 - unsigned long size, unsigned long offset) 22 - { 23 - return find_next_bit(addr, size, offset); 24 - } 25 - 26 - static inline unsigned long find_first_zero_bit_le(const void *addr, 27 - unsigned long size) 28 - { 29 - return find_first_zero_bit(addr, size); 30 - } 31 - 32 12 #elif defined(__BIG_ENDIAN) 33 13 34 14 #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) 35 15 36 - #ifndef find_next_zero_bit_le 37 - static inline 38 - unsigned long find_next_zero_bit_le(const void *addr, unsigned 39 - long size, unsigned long offset) 40 - { 41 - if (small_const_nbits(size)) { 42 - unsigned long val = *(const unsigned long *)addr; 43 - 44 - if (unlikely(offset >= size)) 45 - return size; 46 - 47 - val = swab(val) | ~GENMASK(size - 1, offset); 48 - return val == ~0UL ? size : ffz(val); 49 - } 50 - 51 - return _find_next_bit(addr, NULL, size, offset, ~0UL, 1); 52 - } 53 16 #endif 54 17 55 - #ifndef find_next_bit_le 56 - static inline 57 - unsigned long find_next_bit_le(const void *addr, unsigned 58 - long size, unsigned long offset) 59 - { 60 - if (small_const_nbits(size)) { 61 - unsigned long val = *(const unsigned long *)addr; 62 - 63 - if (unlikely(offset >= size)) 64 - return size; 65 - 66 - val = swab(val) & GENMASK(size - 1, offset); 67 - return val ? __ffs(val) : size; 68 - } 69 - 70 - return _find_next_bit(addr, NULL, size, offset, 0UL, 1); 71 - } 72 - #endif 73 - 74 - #ifndef find_first_zero_bit_le 75 - #define find_first_zero_bit_le(addr, size) \ 76 - find_next_zero_bit_le((addr), (size), 0) 77 - #endif 78 - 79 - #else 80 - #error "Please fix <asm/byteorder.h>" 81 - #endif 82 18 83 19 static inline int test_bit_le(int nr, const void *addr) 84 20 {
+1 -33
include/linux/bitmap.h
··· 6 6 7 7 #include <linux/align.h> 8 8 #include <linux/bitops.h> 9 + #include <linux/find.h> 9 10 #include <linux/limits.h> 10 11 #include <linux/string.h> 11 12 #include <linux/types.h> ··· 55 54 * bitmap_clear(dst, pos, nbits) Clear specified bit area 56 55 * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area 57 56 * bitmap_find_next_zero_area_off(buf, len, pos, n, mask, mask_off) as above 58 - * bitmap_next_clear_region(map, &start, &end, nbits) Find next clear region 59 - * bitmap_next_set_region(map, &start, &end, nbits) Find next set region 60 - * bitmap_for_each_clear_region(map, rs, re, start, end) 61 - * Iterate over all clear regions 62 - * bitmap_for_each_set_region(map, rs, re, start, end) 63 - * Iterate over all set regions 64 57 * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n 65 58 * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n 66 59 * bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest ··· 461 466 __bitmap_replace(dst, old, new, mask, nbits); 462 467 } 463 468 464 - static inline void bitmap_next_clear_region(unsigned long *bitmap, 465 - unsigned int *rs, unsigned int *re, 466 - unsigned int end) 467 - { 468 - *rs = find_next_zero_bit(bitmap, end, *rs); 469 - *re = find_next_bit(bitmap, end, *rs + 1); 470 - } 471 - 472 469 static inline void bitmap_next_set_region(unsigned long *bitmap, 473 470 unsigned int *rs, unsigned int *re, 474 471 unsigned int end) ··· 468 481 *rs = find_next_bit(bitmap, end, *rs); 469 482 *re = find_next_zero_bit(bitmap, end, *rs + 1); 470 483 } 471 - 472 - /* 473 - * Bitmap region iterators. Iterates over the bitmap between [@start, @end). 474 - * @rs and @re should be integer variables and will be set to start and end 475 - * index of the current clear or set region. 476 - */ 477 - #define bitmap_for_each_clear_region(bitmap, rs, re, start, end) \ 478 - for ((rs) = (start), \ 479 - bitmap_next_clear_region((bitmap), &(rs), &(re), (end)); \ 480 - (rs) < (re); \ 481 - (rs) = (re) + 1, \ 482 - bitmap_next_clear_region((bitmap), &(rs), &(re), (end))) 483 - 484 - #define bitmap_for_each_set_region(bitmap, rs, re, start, end) \ 485 - for ((rs) = (start), \ 486 - bitmap_next_set_region((bitmap), &(rs), &(re), (end)); \ 487 - (rs) < (re); \ 488 - (rs) = (re) + 1, \ 489 - bitmap_next_set_region((bitmap), &(rs), &(re), (end))) 490 484 491 485 /** 492 486 * BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
-34
include/linux/bitops.h
··· 32 32 */ 33 33 #include <asm/bitops.h> 34 34 35 - #define for_each_set_bit(bit, addr, size) \ 36 - for ((bit) = find_first_bit((addr), (size)); \ 37 - (bit) < (size); \ 38 - (bit) = find_next_bit((addr), (size), (bit) + 1)) 39 - 40 - /* same as for_each_set_bit() but use bit as value to start with */ 41 - #define for_each_set_bit_from(bit, addr, size) \ 42 - for ((bit) = find_next_bit((addr), (size), (bit)); \ 43 - (bit) < (size); \ 44 - (bit) = find_next_bit((addr), (size), (bit) + 1)) 45 - 46 - #define for_each_clear_bit(bit, addr, size) \ 47 - for ((bit) = find_first_zero_bit((addr), (size)); \ 48 - (bit) < (size); \ 49 - (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) 50 - 51 - /* same as for_each_clear_bit() but use bit as value to start with */ 52 - #define for_each_clear_bit_from(bit, addr, size) \ 53 - for ((bit) = find_next_zero_bit((addr), (size), (bit)); \ 54 - (bit) < (size); \ 55 - (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) 56 - 57 - /** 58 - * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits 59 - * @start: bit offset to start search and to store the current iteration offset 60 - * @clump: location to store copy of current 8-bit clump 61 - * @bits: bitmap address to base the search on 62 - * @size: bitmap size in number of bits 63 - */ 64 - #define for_each_set_clump8(start, clump, bits, size) \ 65 - for ((start) = find_first_clump8(&(clump), (bits), (size)); \ 66 - (start) < (size); \ 67 - (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8)) 68 - 69 35 static inline int get_bitmask_order(unsigned int count) 70 36 { 71 37 int order;
+36 -10
include/linux/cpumask.h
··· 123 123 return 0; 124 124 } 125 125 126 + static inline unsigned int cpumask_first_zero(const struct cpumask *srcp) 127 + { 128 + return 0; 129 + } 130 + 131 + static inline unsigned int cpumask_first_and(const struct cpumask *srcp1, 132 + const struct cpumask *srcp2) 133 + { 134 + return 0; 135 + } 136 + 126 137 static inline unsigned int cpumask_last(const struct cpumask *srcp) 127 138 { 128 139 return 0; ··· 178 167 179 168 static inline int cpumask_any_and_distribute(const struct cpumask *src1p, 180 169 const struct cpumask *src2p) { 181 - return cpumask_next_and(-1, src1p, src2p); 170 + return cpumask_first_and(src1p, src2p); 182 171 } 183 172 184 173 static inline int cpumask_any_distribute(const struct cpumask *srcp) ··· 204 193 static inline unsigned int cpumask_first(const struct cpumask *srcp) 205 194 { 206 195 return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits); 196 + } 197 + 198 + /** 199 + * cpumask_first_zero - get the first unset cpu in a cpumask 200 + * @srcp: the cpumask pointer 201 + * 202 + * Returns >= nr_cpu_ids if all cpus are set. 203 + */ 204 + static inline unsigned int cpumask_first_zero(const struct cpumask *srcp) 205 + { 206 + return find_first_zero_bit(cpumask_bits(srcp), nr_cpumask_bits); 207 + } 208 + 209 + /** 210 + * cpumask_first_and - return the first cpu from *srcp1 & *srcp2 211 + * @src1p: the first input 212 + * @src2p: the second input 213 + * 214 + * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). 215 + */ 216 + static inline 217 + unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2) 218 + { 219 + return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), nr_cpumask_bits); 207 220 } 208 221 209 222 /** ··· 619 584 * Returns >= nr_cpu_ids if no cpus set. 620 585 */ 621 586 #define cpumask_any(srcp) cpumask_first(srcp) 622 - 623 - /** 624 - * cpumask_first_and - return the first cpu from *srcp1 & *srcp2 625 - * @src1p: the first input 626 - * @src2p: the second input 627 - * 628 - * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). 629 - */ 630 - #define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p)) 631 587 632 588 /** 633 589 * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
+372
include/linux/find.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __LINUX_FIND_H_ 3 + #define __LINUX_FIND_H_ 4 + 5 + #ifndef __LINUX_BITMAP_H 6 + #error only <linux/bitmap.h> can be included directly 7 + #endif 8 + 9 + #include <linux/bitops.h> 10 + 11 + extern unsigned long _find_next_bit(const unsigned long *addr1, 12 + const unsigned long *addr2, unsigned long nbits, 13 + unsigned long start, unsigned long invert, unsigned long le); 14 + extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size); 15 + extern unsigned long _find_first_and_bit(const unsigned long *addr1, 16 + const unsigned long *addr2, unsigned long size); 17 + extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size); 18 + extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size); 19 + 20 + #ifndef find_next_bit 21 + /** 22 + * find_next_bit - find the next set bit in a memory region 23 + * @addr: The address to base the search on 24 + * @offset: The bitnumber to start searching at 25 + * @size: The bitmap size in bits 26 + * 27 + * Returns the bit number for the next set bit 28 + * If no bits are set, returns @size. 29 + */ 30 + static inline 31 + unsigned long find_next_bit(const unsigned long *addr, unsigned long size, 32 + unsigned long offset) 33 + { 34 + if (small_const_nbits(size)) { 35 + unsigned long val; 36 + 37 + if (unlikely(offset >= size)) 38 + return size; 39 + 40 + val = *addr & GENMASK(size - 1, offset); 41 + return val ? __ffs(val) : size; 42 + } 43 + 44 + return _find_next_bit(addr, NULL, size, offset, 0UL, 0); 45 + } 46 + #endif 47 + 48 + #ifndef find_next_and_bit 49 + /** 50 + * find_next_and_bit - find the next set bit in both memory regions 51 + * @addr1: The first address to base the search on 52 + * @addr2: The second address to base the search on 53 + * @offset: The bitnumber to start searching at 54 + * @size: The bitmap size in bits 55 + * 56 + * Returns the bit number for the next set bit 57 + * If no bits are set, returns @size. 58 + */ 59 + static inline 60 + unsigned long find_next_and_bit(const unsigned long *addr1, 61 + const unsigned long *addr2, unsigned long size, 62 + unsigned long offset) 63 + { 64 + if (small_const_nbits(size)) { 65 + unsigned long val; 66 + 67 + if (unlikely(offset >= size)) 68 + return size; 69 + 70 + val = *addr1 & *addr2 & GENMASK(size - 1, offset); 71 + return val ? __ffs(val) : size; 72 + } 73 + 74 + return _find_next_bit(addr1, addr2, size, offset, 0UL, 0); 75 + } 76 + #endif 77 + 78 + #ifndef find_next_zero_bit 79 + /** 80 + * find_next_zero_bit - find the next cleared bit in a memory region 81 + * @addr: The address to base the search on 82 + * @offset: The bitnumber to start searching at 83 + * @size: The bitmap size in bits 84 + * 85 + * Returns the bit number of the next zero bit 86 + * If no bits are zero, returns @size. 87 + */ 88 + static inline 89 + unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, 90 + unsigned long offset) 91 + { 92 + if (small_const_nbits(size)) { 93 + unsigned long val; 94 + 95 + if (unlikely(offset >= size)) 96 + return size; 97 + 98 + val = *addr | ~GENMASK(size - 1, offset); 99 + return val == ~0UL ? size : ffz(val); 100 + } 101 + 102 + return _find_next_bit(addr, NULL, size, offset, ~0UL, 0); 103 + } 104 + #endif 105 + 106 + #ifndef find_first_bit 107 + /** 108 + * find_first_bit - find the first set bit in a memory region 109 + * @addr: The address to start the search at 110 + * @size: The maximum number of bits to search 111 + * 112 + * Returns the bit number of the first set bit. 113 + * If no bits are set, returns @size. 114 + */ 115 + static inline 116 + unsigned long find_first_bit(const unsigned long *addr, unsigned long size) 117 + { 118 + if (small_const_nbits(size)) { 119 + unsigned long val = *addr & GENMASK(size - 1, 0); 120 + 121 + return val ? __ffs(val) : size; 122 + } 123 + 124 + return _find_first_bit(addr, size); 125 + } 126 + #endif 127 + 128 + #ifndef find_first_and_bit 129 + /** 130 + * find_first_and_bit - find the first set bit in both memory regions 131 + * @addr1: The first address to base the search on 132 + * @addr2: The second address to base the search on 133 + * @size: The bitmap size in bits 134 + * 135 + * Returns the bit number for the next set bit 136 + * If no bits are set, returns @size. 137 + */ 138 + static inline 139 + unsigned long find_first_and_bit(const unsigned long *addr1, 140 + const unsigned long *addr2, 141 + unsigned long size) 142 + { 143 + if (small_const_nbits(size)) { 144 + unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0); 145 + 146 + return val ? __ffs(val) : size; 147 + } 148 + 149 + return _find_first_and_bit(addr1, addr2, size); 150 + } 151 + #endif 152 + 153 + #ifndef find_first_zero_bit 154 + /** 155 + * find_first_zero_bit - find the first cleared bit in a memory region 156 + * @addr: The address to start the search at 157 + * @size: The maximum number of bits to search 158 + * 159 + * Returns the bit number of the first cleared bit. 160 + * If no bits are zero, returns @size. 161 + */ 162 + static inline 163 + unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) 164 + { 165 + if (small_const_nbits(size)) { 166 + unsigned long val = *addr | ~GENMASK(size - 1, 0); 167 + 168 + return val == ~0UL ? size : ffz(val); 169 + } 170 + 171 + return _find_first_zero_bit(addr, size); 172 + } 173 + #endif 174 + 175 + #ifndef find_last_bit 176 + /** 177 + * find_last_bit - find the last set bit in a memory region 178 + * @addr: The address to start the search at 179 + * @size: The number of bits to search 180 + * 181 + * Returns the bit number of the last set bit, or size. 182 + */ 183 + static inline 184 + unsigned long find_last_bit(const unsigned long *addr, unsigned long size) 185 + { 186 + if (small_const_nbits(size)) { 187 + unsigned long val = *addr & GENMASK(size - 1, 0); 188 + 189 + return val ? __fls(val) : size; 190 + } 191 + 192 + return _find_last_bit(addr, size); 193 + } 194 + #endif 195 + 196 + /** 197 + * find_next_clump8 - find next 8-bit clump with set bits in a memory region 198 + * @clump: location to store copy of found clump 199 + * @addr: address to base the search on 200 + * @size: bitmap size in number of bits 201 + * @offset: bit offset at which to start searching 202 + * 203 + * Returns the bit offset for the next set clump; the found clump value is 204 + * copied to the location pointed by @clump. If no bits are set, returns @size. 205 + */ 206 + extern unsigned long find_next_clump8(unsigned long *clump, 207 + const unsigned long *addr, 208 + unsigned long size, unsigned long offset); 209 + 210 + #define find_first_clump8(clump, bits, size) \ 211 + find_next_clump8((clump), (bits), (size), 0) 212 + 213 + #if defined(__LITTLE_ENDIAN) 214 + 215 + static inline unsigned long find_next_zero_bit_le(const void *addr, 216 + unsigned long size, unsigned long offset) 217 + { 218 + return find_next_zero_bit(addr, size, offset); 219 + } 220 + 221 + static inline unsigned long find_next_bit_le(const void *addr, 222 + unsigned long size, unsigned long offset) 223 + { 224 + return find_next_bit(addr, size, offset); 225 + } 226 + 227 + static inline unsigned long find_first_zero_bit_le(const void *addr, 228 + unsigned long size) 229 + { 230 + return find_first_zero_bit(addr, size); 231 + } 232 + 233 + #elif defined(__BIG_ENDIAN) 234 + 235 + #ifndef find_next_zero_bit_le 236 + static inline 237 + unsigned long find_next_zero_bit_le(const void *addr, unsigned 238 + long size, unsigned long offset) 239 + { 240 + if (small_const_nbits(size)) { 241 + unsigned long val = *(const unsigned long *)addr; 242 + 243 + if (unlikely(offset >= size)) 244 + return size; 245 + 246 + val = swab(val) | ~GENMASK(size - 1, offset); 247 + return val == ~0UL ? size : ffz(val); 248 + } 249 + 250 + return _find_next_bit(addr, NULL, size, offset, ~0UL, 1); 251 + } 252 + #endif 253 + 254 + #ifndef find_next_bit_le 255 + static inline 256 + unsigned long find_next_bit_le(const void *addr, unsigned 257 + long size, unsigned long offset) 258 + { 259 + if (small_const_nbits(size)) { 260 + unsigned long val = *(const unsigned long *)addr; 261 + 262 + if (unlikely(offset >= size)) 263 + return size; 264 + 265 + val = swab(val) & GENMASK(size - 1, offset); 266 + return val ? __ffs(val) : size; 267 + } 268 + 269 + return _find_next_bit(addr, NULL, size, offset, 0UL, 1); 270 + } 271 + #endif 272 + 273 + #ifndef find_first_zero_bit_le 274 + #define find_first_zero_bit_le(addr, size) \ 275 + find_next_zero_bit_le((addr), (size), 0) 276 + #endif 277 + 278 + #else 279 + #error "Please fix <asm/byteorder.h>" 280 + #endif 281 + 282 + #define for_each_set_bit(bit, addr, size) \ 283 + for ((bit) = find_next_bit((addr), (size), 0); \ 284 + (bit) < (size); \ 285 + (bit) = find_next_bit((addr), (size), (bit) + 1)) 286 + 287 + /* same as for_each_set_bit() but use bit as value to start with */ 288 + #define for_each_set_bit_from(bit, addr, size) \ 289 + for ((bit) = find_next_bit((addr), (size), (bit)); \ 290 + (bit) < (size); \ 291 + (bit) = find_next_bit((addr), (size), (bit) + 1)) 292 + 293 + #define for_each_clear_bit(bit, addr, size) \ 294 + for ((bit) = find_next_zero_bit((addr), (size), 0); \ 295 + (bit) < (size); \ 296 + (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) 297 + 298 + /* same as for_each_clear_bit() but use bit as value to start with */ 299 + #define for_each_clear_bit_from(bit, addr, size) \ 300 + for ((bit) = find_next_zero_bit((addr), (size), (bit)); \ 301 + (bit) < (size); \ 302 + (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) 303 + 304 + /** 305 + * for_each_set_bitrange - iterate over all set bit ranges [b; e) 306 + * @b: bit offset of start of current bitrange (first set bit) 307 + * @e: bit offset of end of current bitrange (first unset bit) 308 + * @addr: bitmap address to base the search on 309 + * @size: bitmap size in number of bits 310 + */ 311 + #define for_each_set_bitrange(b, e, addr, size) \ 312 + for ((b) = find_next_bit((addr), (size), 0), \ 313 + (e) = find_next_zero_bit((addr), (size), (b) + 1); \ 314 + (b) < (size); \ 315 + (b) = find_next_bit((addr), (size), (e) + 1), \ 316 + (e) = find_next_zero_bit((addr), (size), (b) + 1)) 317 + 318 + /** 319 + * for_each_set_bitrange_from - iterate over all set bit ranges [b; e) 320 + * @b: bit offset of start of current bitrange (first set bit); must be initialized 321 + * @e: bit offset of end of current bitrange (first unset bit) 322 + * @addr: bitmap address to base the search on 323 + * @size: bitmap size in number of bits 324 + */ 325 + #define for_each_set_bitrange_from(b, e, addr, size) \ 326 + for ((b) = find_next_bit((addr), (size), (b)), \ 327 + (e) = find_next_zero_bit((addr), (size), (b) + 1); \ 328 + (b) < (size); \ 329 + (b) = find_next_bit((addr), (size), (e) + 1), \ 330 + (e) = find_next_zero_bit((addr), (size), (b) + 1)) 331 + 332 + /** 333 + * for_each_clear_bitrange - iterate over all unset bit ranges [b; e) 334 + * @b: bit offset of start of current bitrange (first unset bit) 335 + * @e: bit offset of end of current bitrange (first set bit) 336 + * @addr: bitmap address to base the search on 337 + * @size: bitmap size in number of bits 338 + */ 339 + #define for_each_clear_bitrange(b, e, addr, size) \ 340 + for ((b) = find_next_zero_bit((addr), (size), 0), \ 341 + (e) = find_next_bit((addr), (size), (b) + 1); \ 342 + (b) < (size); \ 343 + (b) = find_next_zero_bit((addr), (size), (e) + 1), \ 344 + (e) = find_next_bit((addr), (size), (b) + 1)) 345 + 346 + /** 347 + * for_each_clear_bitrange_from - iterate over all unset bit ranges [b; e) 348 + * @b: bit offset of start of current bitrange (first set bit); must be initialized 349 + * @e: bit offset of end of current bitrange (first unset bit) 350 + * @addr: bitmap address to base the search on 351 + * @size: bitmap size in number of bits 352 + */ 353 + #define for_each_clear_bitrange_from(b, e, addr, size) \ 354 + for ((b) = find_next_zero_bit((addr), (size), (b)), \ 355 + (e) = find_next_bit((addr), (size), (b) + 1); \ 356 + (b) < (size); \ 357 + (b) = find_next_zero_bit((addr), (size), (e) + 1), \ 358 + (e) = find_next_bit((addr), (size), (b) + 1)) 359 + 360 + /** 361 + * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits 362 + * @start: bit offset to start search and to store the current iteration offset 363 + * @clump: location to store copy of current 8-bit clump 364 + * @bits: bitmap address to base the search on 365 + * @size: bitmap size in number of bits 366 + */ 367 + #define for_each_set_clump8(start, clump, bits, size) \ 368 + for ((start) = find_first_clump8(&(clump), (bits), (size)); \ 369 + (start) < (size); \ 370 + (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8)) 371 + 372 + #endif /*__LINUX_FIND_H_ */
+2 -2
kernel/time/clocksource.c
··· 285 285 return; 286 286 287 287 /* Make sure to select at least one CPU other than the current CPU. */ 288 - cpu = cpumask_next(-1, cpu_online_mask); 288 + cpu = cpumask_first(cpu_online_mask); 289 289 if (cpu == smp_processor_id()) 290 290 cpu = cpumask_next(cpu, cpu_online_mask); 291 291 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) ··· 307 307 cpu = prandom_u32() % nr_cpu_ids; 308 308 cpu = cpumask_next(cpu - 1, cpu_online_mask); 309 309 if (cpu >= nr_cpu_ids) 310 - cpu = cpumask_next(-1, cpu_online_mask); 310 + cpu = cpumask_first(cpu_online_mask); 311 311 if (!WARN_ON_ONCE(cpu >= nr_cpu_ids)) 312 312 cpumask_set_cpu(cpu, &cpus_chosen); 313 313 }
-3
lib/Kconfig
··· 65 65 config GENERIC_NET_UTILS 66 66 bool 67 67 68 - config GENERIC_FIND_FIRST_BIT 69 - bool 70 - 71 68 source "lib/math/Kconfig" 72 69 73 70 config NO_GENERIC_PCI_IOPORT_MAP
+21
lib/find_bit.c
··· 89 89 EXPORT_SYMBOL(_find_first_bit); 90 90 #endif 91 91 92 + #ifndef find_first_and_bit 93 + /* 94 + * Find the first set bit in two memory regions. 95 + */ 96 + unsigned long _find_first_and_bit(const unsigned long *addr1, 97 + const unsigned long *addr2, 98 + unsigned long size) 99 + { 100 + unsigned long idx, val; 101 + 102 + for (idx = 0; idx * BITS_PER_LONG < size; idx++) { 103 + val = addr1[idx] & addr2[idx]; 104 + if (val) 105 + return min(idx * BITS_PER_LONG + __ffs(val), size); 106 + } 107 + 108 + return size; 109 + } 110 + EXPORT_SYMBOL(_find_first_and_bit); 111 + #endif 112 + 92 113 #ifndef find_first_zero_bit 93 114 /* 94 115 * Find the first cleared bit in a memory region.
+21
lib/find_bit_benchmark.c
··· 49 49 return 0; 50 50 } 51 51 52 + static int __init test_find_first_and_bit(void *bitmap, const void *bitmap2, unsigned long len) 53 + { 54 + static DECLARE_BITMAP(cp, BITMAP_LEN) __initdata; 55 + unsigned long i, cnt; 56 + ktime_t time; 57 + 58 + bitmap_copy(cp, bitmap, BITMAP_LEN); 59 + 60 + time = ktime_get(); 61 + for (cnt = i = 0; i < len; cnt++) { 62 + i = find_first_and_bit(cp, bitmap2, len); 63 + __clear_bit(i, cp); 64 + } 65 + time = ktime_get() - time; 66 + pr_err("find_first_and_bit: %18llu ns, %6ld iterations\n", time, cnt); 67 + 68 + return 0; 69 + } 70 + 52 71 static int __init test_find_next_bit(const void *bitmap, unsigned long len) 53 72 { 54 73 unsigned long i, cnt; ··· 148 129 * traverse only part of bitmap to avoid soft lockup. 149 130 */ 150 131 test_find_first_bit(bitmap, BITMAP_LEN / 10); 132 + test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN / 2); 151 133 test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN); 152 134 153 135 pr_err("\nStart testing find_bit() with sparse bitmap\n"); ··· 165 145 test_find_next_zero_bit(bitmap, BITMAP_LEN); 166 146 test_find_last_bit(bitmap, BITMAP_LEN); 167 147 test_find_first_bit(bitmap, BITMAP_LEN); 148 + test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN); 168 149 test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN); 169 150 170 151 /*
+1 -1
lib/genalloc.c
··· 251 251 list_del(&chunk->next_chunk); 252 252 253 253 end_bit = chunk_size(chunk) >> order; 254 - bit = find_next_bit(chunk->bits, end_bit, 0); 254 + bit = find_first_bit(chunk->bits, end_bit); 255 255 BUG_ON(bit < end_bit); 256 256 257 257 vfree(chunk);
+37
lib/test_bitmap.c
··· 446 446 } 447 447 } 448 448 449 + static void __init test_bitmap_printlist(void) 450 + { 451 + unsigned long *bmap = kmalloc(PAGE_SIZE, GFP_KERNEL); 452 + char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 453 + char expected[256]; 454 + int ret, slen; 455 + ktime_t time; 456 + 457 + if (!buf || !bmap) 458 + goto out; 459 + 460 + memset(bmap, -1, PAGE_SIZE); 461 + slen = snprintf(expected, 256, "0-%ld", PAGE_SIZE * 8 - 1); 462 + if (slen < 0) 463 + goto out; 464 + 465 + time = ktime_get(); 466 + ret = bitmap_print_to_pagebuf(true, buf, bmap, PAGE_SIZE * 8); 467 + time = ktime_get() - time; 468 + 469 + if (ret != slen + 1) { 470 + pr_err("bitmap_print_to_pagebuf: result is %d, expected %d\n", ret, slen); 471 + goto out; 472 + } 473 + 474 + if (strncmp(buf, expected, slen)) { 475 + pr_err("bitmap_print_to_pagebuf: result is %s, expected %s\n", buf, expected); 476 + goto out; 477 + } 478 + 479 + pr_err("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time); 480 + out: 481 + kfree(buf); 482 + kfree(bmap); 483 + } 484 + 449 485 static const unsigned long parse_test[] __initconst = { 450 486 BITMAP_FROM_U64(0), 451 487 BITMAP_FROM_U64(1), ··· 854 818 test_bitmap_arr32(); 855 819 test_bitmap_parse(); 856 820 test_bitmap_parselist(); 821 + test_bitmap_printlist(); 857 822 test_mem_optimisations(); 858 823 test_for_each_set_clump8(); 859 824 test_bitmap_cut();
+7 -17
lib/vsprintf.c
··· 1241 1241 struct printf_spec spec, const char *fmt) 1242 1242 { 1243 1243 int nr_bits = max_t(int, spec.field_width, 0); 1244 - /* current bit is 'cur', most recently seen range is [rbot, rtop] */ 1245 - int cur, rbot, rtop; 1246 1244 bool first = true; 1245 + int rbot, rtop; 1247 1246 1248 1247 if (check_pointer(&buf, end, bitmap, spec)) 1249 1248 return buf; 1250 1249 1251 - rbot = cur = find_first_bit(bitmap, nr_bits); 1252 - while (cur < nr_bits) { 1253 - rtop = cur; 1254 - cur = find_next_bit(bitmap, nr_bits, cur + 1); 1255 - if (cur < nr_bits && cur <= rtop + 1) 1256 - continue; 1257 - 1250 + for_each_set_bitrange(rbot, rtop, bitmap, nr_bits) { 1258 1251 if (!first) { 1259 1252 if (buf < end) 1260 1253 *buf = ','; ··· 1256 1263 first = false; 1257 1264 1258 1265 buf = number(buf, end, rbot, default_dec_spec); 1259 - if (rbot < rtop) { 1260 - if (buf < end) 1261 - *buf = '-'; 1262 - buf++; 1266 + if (rtop == rbot + 1) 1267 + continue; 1263 1268 1264 - buf = number(buf, end, rtop, default_dec_spec); 1265 - } 1266 - 1267 - rbot = cur; 1269 + if (buf < end) 1270 + *buf = '-'; 1271 + buf = number(++buf, end, rtop - 1, default_dec_spec); 1268 1272 } 1269 1273 return buf; 1270 1274 }
+16 -19
mm/percpu.c
··· 779 779 { 780 780 struct pcpu_block_md *block = chunk->md_blocks + index; 781 781 unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); 782 - unsigned int rs, re, start; /* region start, region end */ 782 + unsigned int start, end; /* region start, region end */ 783 783 784 784 /* promote scan_hint to contig_hint */ 785 785 if (block->scan_hint) { ··· 795 795 block->right_free = 0; 796 796 797 797 /* iterate over free areas and update the contig hints */ 798 - bitmap_for_each_clear_region(alloc_map, rs, re, start, 799 - PCPU_BITMAP_BLOCK_BITS) 800 - pcpu_block_update(block, rs, re); 798 + for_each_clear_bitrange_from(start, end, alloc_map, PCPU_BITMAP_BLOCK_BITS) 799 + pcpu_block_update(block, start, end); 801 800 } 802 801 803 802 /** ··· 1069 1070 static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, 1070 1071 int *next_off) 1071 1072 { 1072 - unsigned int page_start, page_end, rs, re; 1073 + unsigned int start, end; 1073 1074 1074 - page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE); 1075 - page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); 1075 + start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE); 1076 + end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); 1076 1077 1077 - rs = page_start; 1078 - bitmap_next_clear_region(chunk->populated, &rs, &re, page_end); 1079 - if (rs >= page_end) 1078 + start = find_next_zero_bit(chunk->populated, end, start); 1079 + if (start >= end) 1080 1080 return true; 1081 1081 1082 - *next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE; 1082 + end = find_next_bit(chunk->populated, end, start + 1); 1083 + 1084 + *next_off = end * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE; 1083 1085 return false; 1084 1086 } 1085 1087 ··· 1851 1851 1852 1852 /* populate if not all pages are already there */ 1853 1853 if (!is_atomic) { 1854 - unsigned int page_start, page_end, rs, re; 1854 + unsigned int page_end, rs, re; 1855 1855 1856 - page_start = PFN_DOWN(off); 1856 + rs = PFN_DOWN(off); 1857 1857 page_end = PFN_UP(off + size); 1858 1858 1859 - bitmap_for_each_clear_region(chunk->populated, rs, re, 1860 - page_start, page_end) { 1859 + for_each_clear_bitrange_from(rs, re, chunk->populated, page_end) { 1861 1860 WARN_ON(chunk->immutable); 1862 1861 1863 1862 ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp); ··· 2012 2013 list_for_each_entry_safe(chunk, next, &to_free, list) { 2013 2014 unsigned int rs, re; 2014 2015 2015 - bitmap_for_each_set_region(chunk->populated, rs, re, 0, 2016 - chunk->nr_pages) { 2016 + for_each_set_bitrange(rs, re, chunk->populated, chunk->nr_pages) { 2017 2017 pcpu_depopulate_chunk(chunk, rs, re); 2018 2018 spin_lock_irq(&pcpu_lock); 2019 2019 pcpu_chunk_depopulated(chunk, rs, re); ··· 2082 2084 continue; 2083 2085 2084 2086 /* @chunk can't go away while pcpu_alloc_mutex is held */ 2085 - bitmap_for_each_clear_region(chunk->populated, rs, re, 0, 2086 - chunk->nr_pages) { 2087 + for_each_clear_bitrange(rs, re, chunk->populated, chunk->nr_pages) { 2087 2088 int nr = min_t(int, re - rs, nr_to_pop); 2088 2089 2089 2090 spin_unlock_irq(&pcpu_lock);
+2 -2
net/ncsi/ncsi-manage.c
··· 608 608 bitmap = &ncf->bitmap; 609 609 610 610 spin_lock_irqsave(&nc->lock, flags); 611 - index = find_next_bit(bitmap, ncf->n_vids, 0); 611 + index = find_first_bit(bitmap, ncf->n_vids); 612 612 if (index >= ncf->n_vids) { 613 613 spin_unlock_irqrestore(&nc->lock, flags); 614 614 return -1; ··· 667 667 return -1; 668 668 } 669 669 670 - index = find_next_zero_bit(bitmap, ncf->n_vids, 0); 670 + index = find_first_zero_bit(bitmap, ncf->n_vids); 671 671 if (index < 0 || index >= ncf->n_vids) { 672 672 netdev_err(ndp->ndev.dev, 673 673 "Channel %u already has all VLAN filters set\n",
-1
tools/include/asm-generic/bitops.h
··· 18 18 #include <asm-generic/bitops/fls.h> 19 19 #include <asm-generic/bitops/__fls.h> 20 20 #include <asm-generic/bitops/fls64.h> 21 - #include <asm-generic/bitops/find.h> 22 21 23 22 #ifndef _TOOLS_LINUX_BITOPS_H_ 24 23 #error only <linux/bitops.h> can be included directly
-145
tools/include/asm-generic/bitops/find.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_ 3 - #define _TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_ 4 - 5 - extern unsigned long _find_next_bit(const unsigned long *addr1, 6 - const unsigned long *addr2, unsigned long nbits, 7 - unsigned long start, unsigned long invert, unsigned long le); 8 - extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size); 9 - extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size); 10 - extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size); 11 - 12 - #ifndef find_next_bit 13 - /** 14 - * find_next_bit - find the next set bit in a memory region 15 - * @addr: The address to base the search on 16 - * @offset: The bitnumber to start searching at 17 - * @size: The bitmap size in bits 18 - * 19 - * Returns the bit number for the next set bit 20 - * If no bits are set, returns @size. 21 - */ 22 - static inline 23 - unsigned long find_next_bit(const unsigned long *addr, unsigned long size, 24 - unsigned long offset) 25 - { 26 - if (small_const_nbits(size)) { 27 - unsigned long val; 28 - 29 - if (unlikely(offset >= size)) 30 - return size; 31 - 32 - val = *addr & GENMASK(size - 1, offset); 33 - return val ? __ffs(val) : size; 34 - } 35 - 36 - return _find_next_bit(addr, NULL, size, offset, 0UL, 0); 37 - } 38 - #endif 39 - 40 - #ifndef find_next_and_bit 41 - /** 42 - * find_next_and_bit - find the next set bit in both memory regions 43 - * @addr1: The first address to base the search on 44 - * @addr2: The second address to base the search on 45 - * @offset: The bitnumber to start searching at 46 - * @size: The bitmap size in bits 47 - * 48 - * Returns the bit number for the next set bit 49 - * If no bits are set, returns @size. 50 - */ 51 - static inline 52 - unsigned long find_next_and_bit(const unsigned long *addr1, 53 - const unsigned long *addr2, unsigned long size, 54 - unsigned long offset) 55 - { 56 - if (small_const_nbits(size)) { 57 - unsigned long val; 58 - 59 - if (unlikely(offset >= size)) 60 - return size; 61 - 62 - val = *addr1 & *addr2 & GENMASK(size - 1, offset); 63 - return val ? __ffs(val) : size; 64 - } 65 - 66 - return _find_next_bit(addr1, addr2, size, offset, 0UL, 0); 67 - } 68 - #endif 69 - 70 - #ifndef find_next_zero_bit 71 - /** 72 - * find_next_zero_bit - find the next cleared bit in a memory region 73 - * @addr: The address to base the search on 74 - * @offset: The bitnumber to start searching at 75 - * @size: The bitmap size in bits 76 - * 77 - * Returns the bit number of the next zero bit 78 - * If no bits are zero, returns @size. 79 - */ 80 - static inline 81 - unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, 82 - unsigned long offset) 83 - { 84 - if (small_const_nbits(size)) { 85 - unsigned long val; 86 - 87 - if (unlikely(offset >= size)) 88 - return size; 89 - 90 - val = *addr | ~GENMASK(size - 1, offset); 91 - return val == ~0UL ? size : ffz(val); 92 - } 93 - 94 - return _find_next_bit(addr, NULL, size, offset, ~0UL, 0); 95 - } 96 - #endif 97 - 98 - #ifndef find_first_bit 99 - 100 - /** 101 - * find_first_bit - find the first set bit in a memory region 102 - * @addr: The address to start the search at 103 - * @size: The maximum number of bits to search 104 - * 105 - * Returns the bit number of the first set bit. 106 - * If no bits are set, returns @size. 107 - */ 108 - static inline 109 - unsigned long find_first_bit(const unsigned long *addr, unsigned long size) 110 - { 111 - if (small_const_nbits(size)) { 112 - unsigned long val = *addr & GENMASK(size - 1, 0); 113 - 114 - return val ? __ffs(val) : size; 115 - } 116 - 117 - return _find_first_bit(addr, size); 118 - } 119 - 120 - #endif /* find_first_bit */ 121 - 122 - #ifndef find_first_zero_bit 123 - 124 - /** 125 - * find_first_zero_bit - find the first cleared bit in a memory region 126 - * @addr: The address to start the search at 127 - * @size: The maximum number of bits to search 128 - * 129 - * Returns the bit number of the first cleared bit. 130 - * If no bits are zero, returns @size. 131 - */ 132 - static inline 133 - unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) 134 - { 135 - if (small_const_nbits(size)) { 136 - unsigned long val = *addr | ~GENMASK(size - 1, 0); 137 - 138 - return val == ~0UL ? size : ffz(val); 139 - } 140 - 141 - return _find_first_zero_bit(addr, size); 142 - } 143 - #endif 144 - 145 - #endif /*_TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_ */
+4 -3
tools/include/linux/bitmap.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _PERF_BITOPS_H 3 - #define _PERF_BITOPS_H 2 + #ifndef _TOOLS_LINUX_BITMAP_H 3 + #define _TOOLS_LINUX_BITMAP_H 4 4 5 5 #include <string.h> 6 6 #include <linux/bitops.h> 7 + #include <linux/find.h> 7 8 #include <stdlib.h> 8 9 #include <linux/kernel.h> 9 10 ··· 182 181 return __bitmap_intersects(src1, src2, nbits); 183 182 } 184 183 185 - #endif /* _PERF_BITOPS_H */ 184 + #endif /* _TOOLS_LINUX_BITMAP_H */
+20
tools/lib/find_bit.c
··· 96 96 } 97 97 #endif 98 98 99 + #ifndef find_first_and_bit 100 + /* 101 + * Find the first set bit in two memory regions. 102 + */ 103 + unsigned long _find_first_and_bit(const unsigned long *addr1, 104 + const unsigned long *addr2, 105 + unsigned long size) 106 + { 107 + unsigned long idx, val; 108 + 109 + for (idx = 0; idx * BITS_PER_LONG < size; idx++) { 110 + val = addr1[idx] & addr2[idx]; 111 + if (val) 112 + return min(idx * BITS_PER_LONG + __ffs(val), size); 113 + } 114 + 115 + return size; 116 + } 117 + #endif 118 + 99 119 #ifndef find_first_zero_bit 100 120 /* 101 121 * Find the first cleared bit in a memory region.