Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Convert 'alloc_flex' family to use the new default GFP_KERNEL argument

This is the exact same thing as the 'alloc_obj()' version, only much
smaller because there are a lot fewer users of the *alloc_flex()
interface.

As with alloc_obj() version, this was done entirely with mindless brute
force, using the same script, except using 'flex' in the pattern rather
than 'objs*'.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

+352 -352
+1 -1
arch/arm/kernel/atags_proc.c
··· 54 54 55 55 WARN_ON(tag->hdr.tag != ATAG_NONE); 56 56 57 - b = kmalloc_flex(*b, data, size, GFP_KERNEL); 57 + b = kmalloc_flex(*b, data, size); 58 58 if (!b) 59 59 goto nomem; 60 60
+1 -1
arch/arm64/kernel/machine_kexec_file.c
··· 52 52 for_each_mem_range(i, &start, &end) 53 53 nr_ranges++; 54 54 55 - cmem = kmalloc_flex(*cmem, ranges, nr_ranges, GFP_KERNEL); 55 + cmem = kmalloc_flex(*cmem, ranges, nr_ranges); 56 56 if (!cmem) 57 57 return -ENOMEM; 58 58
+1 -1
arch/loongarch/kernel/machine_kexec_file.c
··· 68 68 for_each_mem_range(i, &start, &end) 69 69 nr_ranges++; 70 70 71 - cmem = kmalloc_flex(*cmem, ranges, nr_ranges, GFP_KERNEL); 71 + cmem = kmalloc_flex(*cmem, ranges, nr_ranges); 72 72 if (!cmem) 73 73 return -ENOMEM; 74 74
+1 -1
arch/powerpc/platforms/ps3/device-init.c
··· 344 344 repo->dev_index, repo->dev_type, port, blk_size, num_blocks, 345 345 num_regions); 346 346 347 - p = kzalloc_flex(*p, regions, num_regions, GFP_KERNEL); 347 + p = kzalloc_flex(*p, regions, num_regions); 348 348 if (!p) { 349 349 result = -ENOMEM; 350 350 goto fail_malloc;
+1 -1
arch/riscv/kernel/machine_kexec_file.c
··· 64 64 nr_ranges = 1; /* For exclusion of crashkernel region */ 65 65 walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback); 66 66 67 - cmem = kmalloc_flex(*cmem, ranges, nr_ranges, GFP_KERNEL); 67 + cmem = kmalloc_flex(*cmem, ranges, nr_ranges); 68 68 if (!cmem) 69 69 return -ENOMEM; 70 70
+1 -1
arch/x86/events/intel/uncore.c
··· 1016 1016 } *attr_group; 1017 1017 for (i = 0; type->event_descs[i].attr.attr.name; i++); 1018 1018 1019 - attr_group = kzalloc_flex(*attr_group, attrs, i + 1, GFP_KERNEL); 1019 + attr_group = kzalloc_flex(*attr_group, attrs, i + 1); 1020 1020 if (!attr_group) 1021 1021 goto err; 1022 1022
+1 -1
arch/x86/events/rapl.c
··· 742 742 else if (rapl_pmu_scope != PERF_PMU_SCOPE_PKG) 743 743 return -EINVAL; 744 744 745 - rapl_pmus = kzalloc_flex(*rapl_pmus, rapl_pmu, nr_rapl_pmu, GFP_KERNEL); 745 + rapl_pmus = kzalloc_flex(*rapl_pmus, rapl_pmu, nr_rapl_pmu); 746 746 if (!rapl_pmus) 747 747 return -ENOMEM; 748 748
+1 -1
arch/x86/kernel/cpu/mce/dev-mcelog.c
··· 338 338 int err; 339 339 340 340 mce_log_len = max(MCE_LOG_MIN_LEN, num_online_cpus()); 341 - mcelog = kzalloc_flex(*mcelog, entry, mce_log_len, GFP_KERNEL); 341 + mcelog = kzalloc_flex(*mcelog, entry, mce_log_len); 342 342 if (!mcelog) 343 343 return -ENOMEM; 344 344
+1 -1
crypto/deflate.c
··· 40 40 DEFLATE_DEF_MEMLEVEL)); 41 41 struct deflate_stream *ctx; 42 42 43 - ctx = kvmalloc_flex(*ctx, workspace, size, GFP_KERNEL); 43 + ctx = kvmalloc_flex(*ctx, workspace, size); 44 44 if (!ctx) 45 45 return ERR_PTR(-ENOMEM); 46 46
+1 -1
crypto/zstd.c
··· 44 44 if (!wksp_size) 45 45 return ERR_PTR(-EINVAL); 46 46 47 - ctx = kvmalloc_flex(*ctx, wksp, wksp_size, GFP_KERNEL); 47 + ctx = kvmalloc_flex(*ctx, wksp, wksp_size); 48 48 if (!ctx) 49 49 return ERR_PTR(-ENOMEM); 50 50
+1 -1
drivers/accel/amdxdna/aie2_error.c
··· 350 350 struct async_events *events; 351 351 int i, ret; 352 352 353 - events = kzalloc_flex(*events, event, total_col, GFP_KERNEL); 353 + events = kzalloc_flex(*events, event, total_col); 354 354 if (!events) 355 355 return -ENOMEM; 356 356
+1 -1
drivers/accel/amdxdna/aie2_solver.c
··· 266 266 struct solver_node *node; 267 267 int ret; 268 268 269 - node = kzalloc_flex(*node, start_cols, cdop->cols_len, GFP_KERNEL); 269 + node = kzalloc_flex(*node, start_cols, cdop->cols_len); 270 270 if (!node) 271 271 return ERR_PTR(-ENOMEM); 272 272
+1 -1
drivers/accel/amdxdna/amdxdna_ctx.c
··· 436 436 int ret, idx; 437 437 438 438 XDNA_DBG(xdna, "Command BO hdl %d, Arg BO count %d", cmd_bo_hdl, arg_bo_cnt); 439 - job = kzalloc_flex(*job, bos, arg_bo_cnt, GFP_KERNEL); 439 + job = kzalloc_flex(*job, bos, arg_bo_cnt); 440 440 if (!job) 441 441 return -ENOMEM; 442 442
+1 -1
drivers/accel/ivpu/ivpu_job.c
··· 525 525 struct ivpu_device *vdev = file_priv->vdev; 526 526 struct ivpu_job *job; 527 527 528 - job = kzalloc_flex(*job, bos, bo_count, GFP_KERNEL); 528 + job = kzalloc_flex(*job, bos, bo_count); 529 529 if (!job) 530 530 return NULL; 531 531
+1 -1
drivers/ata/libata-core.c
··· 2832 2832 if (!nr_cpr) 2833 2833 goto out; 2834 2834 2835 - cpr_log = kzalloc_flex(*cpr_log, cpr, nr_cpr, GFP_KERNEL); 2835 + cpr_log = kzalloc_flex(*cpr_log, cpr, nr_cpr); 2836 2836 if (!cpr_log) 2837 2837 goto out; 2838 2838
+1 -1
drivers/block/ublk_drv.c
··· 4613 4613 goto out_unlock; 4614 4614 4615 4615 ret = -ENOMEM; 4616 - ub = kzalloc_flex(*ub, queues, info.nr_hw_queues, GFP_KERNEL); 4616 + ub = kzalloc_flex(*ub, queues, info.nr_hw_queues); 4617 4617 if (!ub) 4618 4618 goto out_unlock; 4619 4619 mutex_init(&ub->mutex);
+1 -1
drivers/block/zloop.c
··· 997 997 goto out; 998 998 } 999 999 1000 - zlo = kvzalloc_flex(*zlo, zones, nr_zones, GFP_KERNEL); 1000 + zlo = kvzalloc_flex(*zlo, zones, nr_zones); 1001 1001 if (!zlo) { 1002 1002 ret = -ENOMEM; 1003 1003 goto out;
+1 -1
drivers/bus/vexpress-config.c
··· 284 284 val = energy_quirk; 285 285 } 286 286 287 - func = kzalloc_flex(*func, template, num, GFP_KERNEL); 287 + func = kzalloc_flex(*func, template, num); 288 288 if (!func) 289 289 return ERR_PTR(-ENOMEM); 290 290
+1 -1
drivers/char/hpet.c
··· 823 823 return 0; 824 824 } 825 825 826 - hpetp = kzalloc_flex(*hpetp, hp_dev, hdp->hd_nirqs, GFP_KERNEL); 826 + hpetp = kzalloc_flex(*hpetp, hp_dev, hdp->hd_nirqs); 827 827 828 828 if (!hpetp) 829 829 return -ENOMEM;
+1 -1
drivers/char/virtio_console.c
··· 412 412 * Allocate buffer and the sg list. The sg list array is allocated 413 413 * directly after the port_buffer struct. 414 414 */ 415 - buf = kmalloc_flex(*buf, sg, pages, GFP_KERNEL); 415 + buf = kmalloc_flex(*buf, sg, pages); 416 416 if (!buf) 417 417 goto fail; 418 418
+1 -1
drivers/clk/at91/pmc.c
··· 87 87 unsigned int num_clks = ncore + nsystem + nperiph + ngck + npck; 88 88 struct pmc_data *pmc_data; 89 89 90 - pmc_data = kzalloc_flex(*pmc_data, hwtable, num_clks, GFP_KERNEL); 90 + pmc_data = kzalloc_flex(*pmc_data, hwtable, num_clks); 91 91 if (!pmc_data) 92 92 return NULL; 93 93
+1 -1
drivers/clk/at91/sckc.c
··· 502 502 if (IS_ERR(slow_osc)) 503 503 goto unregister_slow_rc; 504 504 505 - clk_data = kzalloc_flex(*clk_data, hws, 2, GFP_KERNEL); 505 + clk_data = kzalloc_flex(*clk_data, hws, 2); 506 506 if (!clk_data) 507 507 goto unregister_slow_osc; 508 508
+1 -1
drivers/clk/bcm/clk-iproc-pll.c
··· 735 735 if (WARN_ON(!pll)) 736 736 return; 737 737 738 - clk_data = kzalloc_flex(*clk_data, hws, num_clks, GFP_KERNEL); 738 + clk_data = kzalloc_flex(*clk_data, hws, num_clks); 739 739 if (WARN_ON(!clk_data)) 740 740 goto err_clk_data; 741 741 clk_data->num = num_clks;
+1 -1
drivers/clk/berlin/bg2.c
··· 499 499 u8 avpll_flags = 0; 500 500 int n, ret; 501 501 502 - clk_data = kzalloc_flex(*clk_data, hws, MAX_CLKS, GFP_KERNEL); 502 + clk_data = kzalloc_flex(*clk_data, hws, MAX_CLKS); 503 503 if (!clk_data) { 504 504 of_node_put(parent_np); 505 505 return;
+1 -1
drivers/clk/berlin/bg2q.c
··· 285 285 struct clk_hw **hws; 286 286 int n, ret; 287 287 288 - clk_data = kzalloc_flex(*clk_data, hws, MAX_CLKS, GFP_KERNEL); 288 + clk_data = kzalloc_flex(*clk_data, hws, MAX_CLKS); 289 289 if (!clk_data) { 290 290 of_node_put(parent_np); 291 291 return;
+1 -1
drivers/clk/clk-asm9260.c
··· 262 262 u32 rate; 263 263 int n; 264 264 265 - clk_data = kzalloc_flex(*clk_data, hws, MAX_CLKS, GFP_KERNEL); 265 + clk_data = kzalloc_flex(*clk_data, hws, MAX_CLKS); 266 266 if (!clk_data) 267 267 return; 268 268 clk_data->num = MAX_CLKS;
+2 -2
drivers/clk/clk-eyeq.c
··· 400 400 401 401 clk_count = data->pll_count + data->div_count + 402 402 data->fixed_factor_count + data->early_clk_count; 403 - cells = kzalloc_flex(*cells, hws, clk_count, GFP_KERNEL); 403 + cells = kzalloc_flex(*cells, hws, clk_count); 404 404 if (!cells) 405 405 return -ENOMEM; 406 406 ··· 738 738 739 739 clk_count = early_data->early_pll_count + early_data->early_fixed_factor_count + 740 740 early_data->late_clk_count; 741 - cells = kzalloc_flex(*cells, hws, clk_count, GFP_KERNEL); 741 + cells = kzalloc_flex(*cells, hws, clk_count); 742 742 if (!cells) { 743 743 ret = -ENOMEM; 744 744 goto err;
+1 -1
drivers/clk/clk-stm32h7.c
··· 1200 1200 const char *hse_clk, *lse_clk, *i2s_clk; 1201 1201 struct regmap *pdrm; 1202 1202 1203 - clk_data = kzalloc_flex(*clk_data, hws, STM32H7_MAX_CLKS, GFP_KERNEL); 1203 + clk_data = kzalloc_flex(*clk_data, hws, STM32H7_MAX_CLKS); 1204 1204 if (!clk_data) 1205 1205 return; 1206 1206
+1 -1
drivers/clk/imgtec/clk-boston.c
··· 58 58 cpu_div = ext_field(mmcmdiv, BOSTON_PLAT_MMCMDIV_CLK1DIV); 59 59 cpu_freq = mult_frac(in_freq, mul, cpu_div); 60 60 61 - onecell = kzalloc_flex(*onecell, hws, BOSTON_CLK_COUNT, GFP_KERNEL); 61 + onecell = kzalloc_flex(*onecell, hws, BOSTON_CLK_COUNT); 62 62 if (!onecell) 63 63 return; 64 64
+1 -1
drivers/clk/imx/clk-imx7d.c
··· 382 382 struct device_node *np; 383 383 void __iomem *base; 384 384 385 - clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX7D_CLK_END, GFP_KERNEL); 385 + clk_hw_data = kzalloc_flex(*clk_hw_data, hws, IMX7D_CLK_END); 386 386 if (WARN_ON(!clk_hw_data)) 387 387 return; 388 388
+1 -1
drivers/clk/ingenic/tcu.c
··· 379 379 } 380 380 } 381 381 382 - tcu->clocks = kzalloc_flex(*tcu->clocks, hws, TCU_CLK_COUNT, GFP_KERNEL); 382 + tcu->clocks = kzalloc_flex(*tcu->clocks, hws, TCU_CLK_COUNT); 383 383 if (!tcu->clocks) { 384 384 ret = -ENOMEM; 385 385 goto err_clk_disable;
+1 -1
drivers/clk/mediatek/clk-mtk.c
··· 67 67 { 68 68 struct clk_hw_onecell_data *clk_data; 69 69 70 - clk_data = kzalloc_flex(*clk_data, hws, clk_num, GFP_KERNEL); 70 + clk_data = kzalloc_flex(*clk_data, hws, clk_num); 71 71 if (!clk_data) 72 72 return NULL; 73 73
+1 -1
drivers/clk/ralink/clk-mt7621.c
··· 372 372 373 373 count = ARRAY_SIZE(mt7621_clks_base) + 374 374 ARRAY_SIZE(mt7621_fixed_clks) + ARRAY_SIZE(mt7621_gates); 375 - clk_data = kzalloc_flex(*clk_data, hws, count, GFP_KERNEL); 375 + clk_data = kzalloc_flex(*clk_data, hws, count); 376 376 if (!clk_data) 377 377 goto free_clk_priv; 378 378
+1 -1
drivers/clk/ralink/clk-mtmips.c
··· 936 936 priv->data = data; 937 937 count = priv->data->num_clk_base + priv->data->num_clk_fixed + 938 938 priv->data->num_clk_factor + priv->data->num_clk_periph; 939 - clk_data = kzalloc_flex(*clk_data, hws, count, GFP_KERNEL); 939 + clk_data = kzalloc_flex(*clk_data, hws, count); 940 940 if (!clk_data) 941 941 goto free_clk_priv; 942 942
+1 -1
drivers/clk/renesas/clk-div6.c
··· 251 251 struct clk *clk; 252 252 unsigned int i; 253 253 254 - clock = kzalloc_flex(*clock, parents, num_parents, GFP_KERNEL); 254 + clock = kzalloc_flex(*clock, parents, num_parents); 255 255 if (!clock) 256 256 return ERR_PTR(-ENOMEM); 257 257
+1 -1
drivers/clk/renesas/clk-mstp.c
··· 184 184 struct clk **clks; 185 185 unsigned int i; 186 186 187 - group = kzalloc_flex(*group, clks, MSTP_MAX_CLOCKS, GFP_KERNEL); 187 + group = kzalloc_flex(*group, clks, MSTP_MAX_CLOCKS); 188 188 if (!group) 189 189 return; 190 190
+1 -1
drivers/clk/renesas/renesas-cpg-mssr.c
··· 1258 1258 } 1259 1259 1260 1260 nclks = info->num_total_core_clks + info->num_hw_mod_clks; 1261 - priv = kzalloc_flex(*priv, clks, nclks, GFP_KERNEL); 1261 + priv = kzalloc_flex(*priv, clks, nclks); 1262 1262 if (!priv) 1263 1263 return -ENOMEM; 1264 1264
+1 -1
drivers/clk/samsung/clk.c
··· 82 82 struct samsung_clk_provider *ctx; 83 83 int i; 84 84 85 - ctx = kzalloc_flex(*ctx, clk_data.hws, nr_clks, GFP_KERNEL); 85 + ctx = kzalloc_flex(*ctx, clk_data.hws, nr_clks); 86 86 if (!ctx) 87 87 panic("could not allocate clock provider context.\n"); 88 88
+1 -1
drivers/clk/visconti/pll.c
··· 330 330 struct visconti_pll_provider *ctx; 331 331 int i; 332 332 333 - ctx = kzalloc_flex(*ctx, clk_data.hws, nr_plls, GFP_KERNEL); 333 + ctx = kzalloc_flex(*ctx, clk_data.hws, nr_plls); 334 334 if (!ctx) 335 335 return ERR_PTR(-ENOMEM); 336 336
+1 -1
drivers/clk/zynqmp/clkc.c
··· 759 759 if (ret) 760 760 return ret; 761 761 762 - zynqmp_data = kzalloc_flex(*zynqmp_data, hws, clock_max_idx, GFP_KERNEL); 762 + zynqmp_data = kzalloc_flex(*zynqmp_data, hws, clock_max_idx); 763 763 if (!zynqmp_data) 764 764 return -ENOMEM; 765 765
+1 -1
drivers/clocksource/ingenic-timer.c
··· 286 286 if (IS_ERR(map)) 287 287 return PTR_ERR(map); 288 288 289 - tcu = kzalloc_flex(*tcu, timers, num_possible_cpus(), GFP_KERNEL); 289 + tcu = kzalloc_flex(*tcu, timers, num_possible_cpus()); 290 290 if (!tcu) 291 291 return -ENOMEM; 292 292
+1 -1
drivers/cpufreq/e_powersaver.c
··· 320 320 states = 2; 321 321 322 322 /* Allocate private data and frequency table for current cpu */ 323 - centaur = kzalloc_flex(*centaur, freq_table, states + 1, GFP_KERNEL); 323 + centaur = kzalloc_flex(*centaur, freq_table, states + 1); 324 324 if (!centaur) 325 325 return -ENOMEM; 326 326 eps_cpu[0] = centaur;
+1 -1
drivers/cxl/core/features.c
··· 94 94 return NULL; 95 95 96 96 struct cxl_feat_entries *entries __free(kvfree) = 97 - kvmalloc_flex(*entries, ent, count, GFP_KERNEL); 97 + kvmalloc_flex(*entries, ent, count); 98 98 if (!entries) 99 99 return NULL; 100 100
+1 -1
drivers/cxl/core/memdev.c
··· 831 831 struct cxl_mbox_cmd mbox_cmd; 832 832 int rc; 833 833 834 - transfer = kzalloc_flex(*transfer, data, 0, GFP_KERNEL); 834 + transfer = kzalloc_flex(*transfer, data, 0); 835 835 if (!transfer) 836 836 return -ENOMEM; 837 837
+2 -2
drivers/cxl/core/port.c
··· 2017 2017 if (!is_cxl_root(port)) 2018 2018 return ERR_PTR(-EINVAL); 2019 2019 2020 - cxlrd = kzalloc_flex(*cxlrd, cxlsd.target, nr_targets, GFP_KERNEL); 2020 + cxlrd = kzalloc_flex(*cxlrd, cxlsd.target, nr_targets); 2021 2021 if (!cxlrd) 2022 2022 return ERR_PTR(-ENOMEM); 2023 2023 ··· 2070 2070 if (is_cxl_root(port) || is_cxl_endpoint(port)) 2071 2071 return ERR_PTR(-EINVAL); 2072 2072 2073 - cxlsd = kzalloc_flex(*cxlsd, target, nr_targets, GFP_KERNEL); 2073 + cxlsd = kzalloc_flex(*cxlsd, target, nr_targets); 2074 2074 if (!cxlsd) 2075 2075 return ERR_PTR(-ENOMEM); 2076 2076
+1 -1
drivers/cxl/core/region.c
··· 3464 3464 return -ENXIO; 3465 3465 3466 3466 struct cxl_pmem_region *cxlr_pmem __free(kfree) = 3467 - kzalloc_flex(*cxlr_pmem, mapping, p->nr_targets, GFP_KERNEL); 3467 + kzalloc_flex(*cxlr_pmem, mapping, p->nr_targets); 3468 3468 if (!cxlr_pmem) 3469 3469 return -ENOMEM; 3470 3470
+1 -1
drivers/cxl/pmem.c
··· 234 234 return -EINVAL; 235 235 236 236 set_lsa = 237 - kvzalloc_flex(*set_lsa, data, cmd->in_length, GFP_KERNEL); 237 + kvzalloc_flex(*set_lsa, data, cmd->in_length); 238 238 if (!set_lsa) 239 239 return -ENOMEM; 240 240
+1 -1
drivers/dax/kmem.c
··· 121 121 init_node_memory_type(numa_node, mtype); 122 122 123 123 rc = -ENOMEM; 124 - data = kzalloc_flex(*data, res, dev_dax->nr_range, GFP_KERNEL); 124 + data = kzalloc_flex(*data, res, dev_dax->nr_range); 125 125 if (!data) 126 126 goto err_dax_kmem_data; 127 127
+1 -1
drivers/dma-buf/dma-fence-array.c
··· 179 179 { 180 180 struct dma_fence_array *array; 181 181 182 - return kzalloc_flex(*array, callbacks, num_fences, GFP_KERNEL); 182 + return kzalloc_flex(*array, callbacks, num_fences); 183 183 } 184 184 EXPORT_SYMBOL(dma_fence_array_alloc); 185 185
+1 -1
drivers/dma/timb_dma.c
··· 635 635 DRIVER_NAME)) 636 636 return -EBUSY; 637 637 638 - td = kzalloc_flex(*td, channels, pdata->nr_channels, GFP_KERNEL); 638 + td = kzalloc_flex(*td, channels, pdata->nr_channels); 639 639 if (!td) { 640 640 err = -ENOMEM; 641 641 goto err_release_region;
+1 -1
drivers/edac/imh_base.c
··· 257 257 struct skx_dev *d; 258 258 259 259 for (i = 0; i < n; i++) { 260 - d = kzalloc_flex(*d, imc, imc_num, GFP_KERNEL); 260 + d = kzalloc_flex(*d, imc, imc_num); 261 261 if (!d) 262 262 return -ENOMEM; 263 263
+1 -1
drivers/edac/skx_common.c
··· 346 346 if (!pdev) 347 347 break; 348 348 ndev++; 349 - d = kzalloc_flex(*d, imc, imc_num, GFP_KERNEL); 349 + d = kzalloc_flex(*d, imc, imc_num); 350 350 if (!d) { 351 351 pci_dev_put(pdev); 352 352 return -ENOMEM;
+1 -1
drivers/extcon/extcon-usbc-cros-ec.c
··· 68 68 struct cros_ec_command *msg; 69 69 int ret; 70 70 71 - msg = kzalloc_flex(*msg, data, max(outsize, insize), GFP_KERNEL); 71 + msg = kzalloc_flex(*msg, data, max(outsize, insize)); 72 72 if (!msg) 73 73 return -ENOMEM; 74 74
+1 -1
drivers/firewire/core-cdev.c
··· 941 941 if (a->length > 256) 942 942 return -EINVAL; 943 943 944 - r = kmalloc_flex(*r, data, a->length, GFP_KERNEL); 944 + r = kmalloc_flex(*r, data, a->length); 945 945 if (r == NULL) 946 946 return -ENOMEM; 947 947
+2 -2
drivers/gpio/gpio-aggregator.c
··· 916 916 if (gpio_aggregator_count_lines(aggr) == 0) 917 917 return -EINVAL; 918 918 919 - aggr->lookups = kzalloc_flex(*aggr->lookups, table, 1, GFP_KERNEL); 919 + aggr->lookups = kzalloc_flex(*aggr->lookups, table, 1); 920 920 if (!aggr->lookups) 921 921 return -ENOMEM; 922 922 ··· 1456 1456 memcpy(aggr->args, buf, count + 1); 1457 1457 1458 1458 aggr->init_via_sysfs = true; 1459 - aggr->lookups = kzalloc_flex(*aggr->lookups, table, 1, GFP_KERNEL); 1459 + aggr->lookups = kzalloc_flex(*aggr->lookups, table, 1); 1460 1460 if (!aggr->lookups) { 1461 1461 res = -ENOMEM; 1462 1462 goto free_ga;
+1 -1
drivers/gpio/gpio-virtuser.c
··· 1388 1388 lockdep_assert_held(&dev->lock); 1389 1389 1390 1390 struct gpiod_lookup_table *table __free(kfree) = 1391 - kzalloc_flex(*table, table, num_entries + 1, GFP_KERNEL); 1391 + kzalloc_flex(*table, table, num_entries + 1); 1392 1392 if (!table) 1393 1393 return -ENOMEM; 1394 1394
+1 -1
drivers/gpio/gpiolib-cdev.c
··· 1610 1610 if (ret) 1611 1611 return ret; 1612 1612 1613 - lr = kvzalloc_flex(*lr, lines, ulr.num_lines, GFP_KERNEL); 1613 + lr = kvzalloc_flex(*lr, lines, ulr.num_lines); 1614 1614 if (!lr) 1615 1615 return -ENOMEM; 1616 1616 lr->num_lines = ulr.num_lines;
+1 -1
drivers/gpio/gpiolib-shared.c
··· 478 478 if (!key) 479 479 return -ENOMEM; 480 480 481 - lookup = kzalloc_flex(*lookup, table, 2, GFP_KERNEL); 481 + lookup = kzalloc_flex(*lookup, table, 2); 482 482 if (!lookup) 483 483 return -ENOMEM; 484 484
+1 -1
drivers/gpio/gpiolib.c
··· 147 147 struct gpio_desc_label *new = NULL, *old; 148 148 149 149 if (label) { 150 - new = kzalloc_flex(*new, str, strlen(label) + 1, GFP_KERNEL); 150 + new = kzalloc_flex(*new, str, strlen(label) + 1); 151 151 if (!new) 152 152 return -ENOMEM; 153 153
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
··· 76 76 unsigned i; 77 77 int r; 78 78 79 - list = kvzalloc_flex(*list, entries, num_entries, GFP_KERNEL); 79 + list = kvzalloc_flex(*list, entries, num_entries); 80 80 if (!list) 81 81 return -ENOMEM; 82 82
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
··· 212 212 int32_t ctx_prio; 213 213 int r; 214 214 215 - entity = kzalloc_flex(*entity, fences, amdgpu_sched_jobs, GFP_KERNEL); 215 + entity = kzalloc_flex(*entity, fences, amdgpu_sched_jobs); 216 216 if (!entity) 217 217 return -ENOMEM; 218 218
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
··· 122 122 struct ttm_range_mgr_node *node; 123 123 int r; 124 124 125 - node = kzalloc_flex(*node, mm_nodes, 1, GFP_KERNEL); 125 + node = kzalloc_flex(*node, mm_nodes, 1); 126 126 if (!node) 127 127 return -ENOMEM; 128 128
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 198 198 if (num_ibs == 0) 199 199 return -EINVAL; 200 200 201 - *job = kzalloc_flex(**job, ibs, num_ibs, GFP_KERNEL); 201 + *job = kzalloc_flex(**job, ibs, num_ibs); 202 202 if (!*job) 203 203 return -ENOMEM; 204 204
+1 -1
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
··· 165 165 PP_ASSERT_WITH_CODE((0 != vddc_lookup_pp_tables->ucNumEntries), 166 166 "Invalid CAC Leakage PowerPlay Table!", return 1); 167 167 168 - table = kzalloc_flex(*table, entries, max_levels, GFP_KERNEL); 168 + table = kzalloc_flex(*table, entries, max_levels); 169 169 if (!table) 170 170 return -ENOMEM; 171 171
+2 -2
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
··· 133 133 int count = 8; 134 134 struct phm_clock_voltage_dependency_table *table_clk_vlt; 135 135 136 - table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, count, GFP_KERNEL); 136 + table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, count); 137 137 138 138 if (NULL == table_clk_vlt) { 139 139 pr_err("Can not allocate memory!\n"); ··· 472 472 uint32_t i; 473 473 struct smu10_voltage_dependency_table *ptable; 474 474 475 - ptable = kzalloc_flex(*ptable, entries, num_entry, GFP_KERNEL); 475 + ptable = kzalloc_flex(*ptable, entries, num_entry); 476 476 if (NULL == ptable) 477 477 return -ENOMEM; 478 478
+1 -1
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
··· 276 276 { 277 277 struct phm_clock_voltage_dependency_table *table_clk_vlt; 278 278 279 - table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, 8, GFP_KERNEL); 279 + table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, 8); 280 280 281 281 if (NULL == table_clk_vlt) { 282 282 pr_err("Can not allocate memory!\n");
+1 -1
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
··· 495 495 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); 496 496 497 497 /* initialize vddc_dep_on_dal_pwrl table */ 498 - table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, 4, GFP_KERNEL); 498 + table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, 4); 499 499 500 500 if (NULL == table_clk_vlt) { 501 501 pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
+2 -2
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
··· 755 755 num_entries = clk_dep_table->ucNumEntries; 756 756 757 757 758 - clk_table = kzalloc_flex(*clk_table, entries, num_entries, GFP_KERNEL); 758 + clk_table = kzalloc_flex(*clk_table, entries, num_entries); 759 759 if (!clk_table) 760 760 return -ENOMEM; 761 761 ··· 1040 1040 PP_ASSERT_WITH_CODE((vddc_lookup_pp_tables->ucNumEntries != 0), 1041 1041 "Invalid SOC_VDDD Lookup Table!", return 1); 1042 1042 1043 - table = kzalloc_flex(*table, entries, max_levels, GFP_KERNEL); 1043 + table = kzalloc_flex(*table, entries, max_levels); 1044 1044 if (!table) 1045 1045 return -ENOMEM; 1046 1046
+1 -1
drivers/gpu/drm/i915/display/intel_dsi_vbt.c
··· 231 231 { 232 232 struct gpiod_lookup_table *lookup; 233 233 234 - lookup = kzalloc_flex(*lookup, table, 2, GFP_KERNEL); 234 + lookup = kzalloc_flex(*lookup, table, 2); 235 235 if (!lookup) 236 236 return; 237 237
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_context.c
··· 1103 1103 { 1104 1104 struct i915_gem_engines *e; 1105 1105 1106 - e = kzalloc_flex(*e, engines, count, GFP_KERNEL); 1106 + e = kzalloc_flex(*e, engines, count); 1107 1107 if (!e) 1108 1108 return NULL; 1109 1109
+1 -1
drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
··· 103 103 struct dma_buf *dmabuf; 104 104 int i; 105 105 106 - mock = kmalloc_flex(*mock, pages, npages, GFP_KERNEL); 106 + mock = kmalloc_flex(*mock, pages, npages); 107 107 if (!mock) 108 108 return ERR_PTR(-ENOMEM); 109 109
+1 -1
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
··· 3934 3934 unsigned int n; 3935 3935 int err; 3936 3936 3937 - ve = kzalloc_flex(*ve, siblings, count, GFP_KERNEL); 3937 + ve = kzalloc_flex(*ve, siblings, count); 3938 3938 if (!ve) 3939 3939 return ERR_PTR(-ENOMEM); 3940 3940
+1 -1
drivers/gpu/drm/i915/gvt/kvmgt.c
··· 1183 1183 VFIO_REGION_INFO_FLAG_WRITE; 1184 1184 info->size = gvt_aperture_sz(vgpu->gvt); 1185 1185 1186 - sparse = kzalloc_flex(*sparse, areas, nr_areas, GFP_KERNEL); 1186 + sparse = kzalloc_flex(*sparse, areas, nr_areas); 1187 1187 if (!sparse) 1188 1188 return -ENOMEM; 1189 1189
+2 -2
drivers/gpu/drm/i915/i915_syncmap.c
··· 197 197 { 198 198 struct i915_syncmap *p; 199 199 200 - p = kmalloc_flex(*p, seqno, KSYNCMAP, GFP_KERNEL); 200 + p = kmalloc_flex(*p, seqno, KSYNCMAP); 201 201 if (unlikely(!p)) 202 202 return NULL; 203 203 ··· 279 279 unsigned int above; 280 280 281 281 /* Insert a join above the current layer */ 282 - next = kzalloc_flex(*next, child, KSYNCMAP, GFP_KERNEL); 282 + next = kzalloc_flex(*next, child, KSYNCMAP); 283 283 if (unlikely(!next)) 284 284 return -ENOMEM; 285 285
+1 -1
drivers/gpu/drm/i915/selftests/i915_request.c
··· 2841 2841 if (!stats) 2842 2842 return -ENOMEM; 2843 2843 2844 - ps = kzalloc_flex(*ps, ce, nengines, GFP_KERNEL); 2844 + ps = kzalloc_flex(*ps, ce, nengines); 2845 2845 if (!ps) { 2846 2846 kfree(stats); 2847 2847 return -ENOMEM;
+2 -2
drivers/gpu/drm/nouveau/nouveau_svm.c
··· 900 900 { 901 901 struct nouveau_pfnmap_args *args; 902 902 903 - args = kzalloc_flex(*args, p.phys, npages, GFP_KERNEL); 903 + args = kzalloc_flex(*args, p.phys, npages); 904 904 if (!args) 905 905 return NULL; 906 906 ··· 1063 1063 if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL) 1064 1064 return; 1065 1065 1066 - drm->svm = svm = kzalloc_flex(*drm->svm, buffer, 1, GFP_KERNEL); 1066 + drm->svm = svm = kzalloc_flex(*drm->svm, buffer, 1); 1067 1067 if (!drm->svm) 1068 1068 return; 1069 1069
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chid.c
··· 89 89 struct nvkm_chid *chid; 90 90 int id; 91 91 92 - if (!(chid = *pchid = kzalloc_flex(*chid, used, nr, GFP_KERNEL))) 92 + if (!(chid = *pchid = kzalloc_flex(*chid, used, nr))) 93 93 return -ENOMEM; 94 94 95 95 kref_init(&chid->kref);
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c
··· 106 106 { 107 107 struct nvkm_engine_func *func; 108 108 109 - func = kzalloc_flex(*func, sclass, nclass + 1, GFP_KERNEL); 109 + func = kzalloc_flex(*func, sclass, nclass + 1); 110 110 if (!func) 111 111 return -ENOMEM; 112 112
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c
··· 56 56 struct nvkm_gr_func *func; 57 57 struct r535_gr *gr; 58 58 59 - func = kzalloc_flex(*func, sclass, ARRAY_SIZE(classes) + 1, GFP_KERNEL); 59 + func = kzalloc_flex(*func, sclass, ARRAY_SIZE(classes) + 1); 60 60 if (!func) 61 61 return -ENOMEM; 62 62
+1 -1
drivers/gpu/drm/ttm/ttm_range_manager.c
··· 73 73 if (!lpfn) 74 74 lpfn = man->size; 75 75 76 - node = kzalloc_flex(*node, mm_nodes, 1, GFP_KERNEL); 76 + node = kzalloc_flex(*node, mm_nodes, 1); 77 77 if (!node) 78 78 return -ENOMEM; 79 79
+1 -1
drivers/gpu/drm/v3d/v3d_perfmon.c
··· 353 353 return -EINVAL; 354 354 } 355 355 356 - perfmon = kzalloc_flex(*perfmon, values, req->ncounters, GFP_KERNEL); 356 + perfmon = kzalloc_flex(*perfmon, values, req->ncounters); 357 357 if (!perfmon) 358 358 return -ENOMEM; 359 359
+1 -1
drivers/gpu/drm/vc4/vc4_perfmon.c
··· 172 172 return -EINVAL; 173 173 } 174 174 175 - perfmon = kzalloc_flex(*perfmon, counters, req->ncounters, GFP_KERNEL); 175 + perfmon = kzalloc_flex(*perfmon, counters, req->ncounters); 176 176 if (!perfmon) 177 177 return -ENOMEM; 178 178 perfmon->dev = vc4;
+1 -1
drivers/gpu/drm/virtio/virtgpu_gem.c
··· 167 167 { 168 168 struct virtio_gpu_object_array *objs; 169 169 170 - objs = kmalloc_flex(*objs, objs, nents, GFP_KERNEL); 170 + objs = kmalloc_flex(*objs, objs, nents); 171 171 if (!objs) 172 172 return NULL; 173 173
+1 -1
drivers/gpu/drm/xe/xe_exec_queue.c
··· 208 208 /* only kernel queues can be permanent */ 209 209 XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL)); 210 210 211 - q = kzalloc_flex(*q, lrc, width, GFP_KERNEL); 211 + q = kzalloc_flex(*q, lrc, width); 212 212 if (!q) 213 213 return ERR_PTR(-ENOMEM); 214 214
+1 -1
drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
··· 710 710 if (ret < 0) 711 711 return ret; 712 712 713 - cbd = kzalloc_flex(*cbd, blob, ret, GFP_KERNEL); 713 + cbd = kzalloc_flex(*cbd, blob, ret); 714 714 if (!cbd) 715 715 return -ENOMEM; 716 716
+1 -1
drivers/gpu/drm/xe/xe_lmtt.c
··· 64 64 struct xe_bo *bo; 65 65 int err; 66 66 67 - pt = kzalloc_flex(*pt, entries, num_entries, GFP_KERNEL); 67 + pt = kzalloc_flex(*pt, entries, num_entries); 68 68 if (!pt) { 69 69 err = -ENOMEM; 70 70 goto out;
+1 -1
drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
··· 33 33 struct xe_ttm_sys_node *node; 34 34 int r; 35 35 36 - node = kzalloc_flex(*node, base.mm_nodes, 1, GFP_KERNEL); 36 + node = kzalloc_flex(*node, base.mm_nodes, 1); 37 37 if (!node) 38 38 return -ENOMEM; 39 39
+1 -1
drivers/greybus/module.c
··· 93 93 struct gb_module *module; 94 94 int i; 95 95 96 - module = kzalloc_flex(*module, interfaces, num_interfaces, GFP_KERNEL); 96 + module = kzalloc_flex(*module, interfaces, num_interfaces); 97 97 if (!module) 98 98 return NULL; 99 99
+1 -1
drivers/hte/hte.c
··· 850 850 return -EINVAL; 851 851 } 852 852 853 - gdev = kzalloc_flex(*gdev, ei, chip->nlines, GFP_KERNEL); 853 + gdev = kzalloc_flex(*gdev, ei, chip->nlines); 854 854 if (!gdev) 855 855 return -ENOMEM; 856 856
+1 -1
drivers/i2c/i2c-atr.c
··· 724 724 if (!ops || !ops->attach_addr || !ops->detach_addr) 725 725 return ERR_PTR(-EINVAL); 726 726 727 - atr = kzalloc_flex(*atr, adapter, max_adapters, GFP_KERNEL); 727 + atr = kzalloc_flex(*atr, adapter, max_adapters); 728 728 if (!atr) 729 729 return ERR_PTR(-ENOMEM); 730 730
+1 -1
drivers/i3c/master/adi-i3c-master.c
··· 187 187 { 188 188 struct adi_i3c_xfer *xfer; 189 189 190 - xfer = kzalloc_flex(*xfer, cmds, ncmds, GFP_KERNEL); 190 + xfer = kzalloc_flex(*xfer, cmds, ncmds); 191 191 if (!xfer) 192 192 return NULL; 193 193
+1 -1
drivers/i3c/master/dw-i3c-master.c
··· 382 382 { 383 383 struct dw_i3c_xfer *xfer; 384 384 385 - xfer = kzalloc_flex(*xfer, cmds, ncmds, GFP_KERNEL); 385 + xfer = kzalloc_flex(*xfer, cmds, ncmds); 386 386 if (!xfer) 387 387 return NULL; 388 388
+1 -1
drivers/i3c/master/i3c-master-cdns.c
··· 498 498 { 499 499 struct cdns_i3c_xfer *xfer; 500 500 501 - xfer = kzalloc_flex(*xfer, cmds, ncmds, GFP_KERNEL); 501 + xfer = kzalloc_flex(*xfer, cmds, ncmds); 502 502 if (!xfer) 503 503 return NULL; 504 504
+1 -1
drivers/i3c/master/mipi-i3c-hci/dma.c
··· 328 328 } 329 329 if (nr_rings > XFER_RINGS) 330 330 nr_rings = XFER_RINGS; 331 - rings = kzalloc_flex(*rings, headers, nr_rings, GFP_KERNEL); 331 + rings = kzalloc_flex(*rings, headers, nr_rings); 332 332 if (!rings) 333 333 return -ENOMEM; 334 334 hci->io_data = rings;
+1 -1
drivers/i3c/master/renesas-i3c.c
··· 345 345 { 346 346 struct renesas_i3c_xfer *xfer; 347 347 348 - xfer = kzalloc_flex(*xfer, cmds, ncmds, GFP_KERNEL); 348 + xfer = kzalloc_flex(*xfer, cmds, ncmds); 349 349 if (!xfer) 350 350 return NULL; 351 351
+1 -1
drivers/i3c/master/svc-i3c-master.c
··· 1504 1504 { 1505 1505 struct svc_i3c_xfer *xfer; 1506 1506 1507 - xfer = kzalloc_flex(*xfer, cmds, ncmds, GFP_KERNEL); 1507 + xfer = kzalloc_flex(*xfer, cmds, ncmds); 1508 1508 if (!xfer) 1509 1509 return NULL; 1510 1510
+1 -1
drivers/iio/buffer/industrialio-hw-consumer.c
··· 60 60 return buf; 61 61 } 62 62 63 - buf = kzalloc_flex(*buf, scan_mask, mask_longs, GFP_KERNEL); 63 + buf = kzalloc_flex(*buf, scan_mask, mask_longs); 64 64 if (!buf) 65 65 return NULL; 66 66
+1 -1
drivers/infiniband/core/cm.c
··· 4050 4050 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); 4051 4051 atomic_long_inc(&port->counters[CM_RECV][attr_id - CM_ATTR_ID_OFFSET]); 4052 4052 4053 - work = kmalloc_flex(*work, path, paths, GFP_KERNEL); 4053 + work = kmalloc_flex(*work, path, paths); 4054 4054 if (!work) { 4055 4055 ib_free_recv_mad(mad_recv_wc); 4056 4056 return;
+1 -1
drivers/infiniband/core/multicast.c
··· 823 823 int i; 824 824 int count = 0; 825 825 826 - dev = kmalloc_flex(*dev, port, device->phys_port_cnt, GFP_KERNEL); 826 + dev = kmalloc_flex(*dev, port, device->phys_port_cnt); 827 827 if (!dev) 828 828 return -ENOMEM; 829 829
+1 -1
drivers/infiniband/core/verbs.c
··· 3205 3205 { 3206 3206 struct rdma_hw_stats *stats; 3207 3207 3208 - stats = kzalloc_flex(*stats, value, num_counters, GFP_KERNEL); 3208 + stats = kzalloc_flex(*stats, value, num_counters); 3209 3209 if (!stats) 3210 3210 return NULL; 3211 3211
+1 -1
drivers/infiniband/hw/hfi1/user_exp_rcv.c
··· 735 735 * Allocate the node first so we can handle a potential 736 736 * failure before we've programmed anything. 737 737 */ 738 - node = kzalloc_flex(*node, pages, npages, GFP_KERNEL); 738 + node = kzalloc_flex(*node, pages, npages); 739 739 if (!node) 740 740 return -ENOMEM; 741 741
+1 -1
drivers/infiniband/hw/mlx5/odp.c
··· 2097 2097 return mlx5_ib_prefetch_sg_list(pd, advice, pf_flags, sg_list, 2098 2098 num_sge); 2099 2099 2100 - work = kvzalloc_flex(*work, frags, num_sge, GFP_KERNEL); 2100 + work = kvzalloc_flex(*work, frags, num_sge); 2101 2101 if (!work) 2102 2102 return -ENOMEM; 2103 2103
+2 -2
drivers/infiniband/hw/mthca/mthca_memfree.c
··· 367 367 obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size; 368 368 num_icm = DIV_ROUND_UP(nobj, obj_per_chunk); 369 369 370 - table = kmalloc_flex(*table, icm, num_icm, GFP_KERNEL); 370 + table = kmalloc_flex(*table, icm, num_icm); 371 371 if (!table) 372 372 return NULL; 373 373 ··· 532 532 return NULL; 533 533 534 534 npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; 535 - db_tab = kmalloc_flex(*db_tab, page, npages, GFP_KERNEL); 535 + db_tab = kmalloc_flex(*db_tab, page, npages); 536 536 if (!db_tab) 537 537 return ERR_PTR(-ENOMEM); 538 538
+1 -1
drivers/infiniband/sw/rdmavt/mr.c
··· 242 242 243 243 /* Allocate struct plus pointers to first level page tables. */ 244 244 m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ; 245 - mr = kzalloc_flex(*mr, mr.map, m, GFP_KERNEL); 245 + mr = kzalloc_flex(*mr, mr.map, m); 246 246 if (!mr) 247 247 goto bail; 248 248
+1 -1
drivers/infiniband/sw/rxe/rxe_odp.c
··· 523 523 num_sge); 524 524 525 525 /* Asynchronous call is "best-effort" and allowed to fail */ 526 - work = kvzalloc_flex(*work, frags, num_sge, GFP_KERNEL); 526 + work = kvzalloc_flex(*work, frags, num_sge); 527 527 if (!work) 528 528 return -ENOMEM; 529 529
+1 -1
drivers/infiniband/sw/siw/siw_mem.c
··· 321 321 if (num_buf == 0) 322 322 return ERR_PTR(-EINVAL); 323 323 324 - pbl = kzalloc_flex(*pbl, pbe, num_buf, GFP_KERNEL); 324 + pbl = kzalloc_flex(*pbl, pbe, num_buf); 325 325 if (!pbl) 326 326 return ERR_PTR(-ENOMEM); 327 327
+1 -1
drivers/infiniband/ulp/srp/ib_srp.c
··· 418 418 if (pool_size <= 0) 419 419 goto err; 420 420 ret = -ENOMEM; 421 - pool = kzalloc_flex(*pool, desc, pool_size, GFP_KERNEL); 421 + pool = kzalloc_flex(*pool, desc, pool_size); 422 422 if (!pool) 423 423 goto err; 424 424 pool->size = pool_size;
+1 -1
drivers/infiniband/ulp/srpt/ib_srpt.c
··· 3210 3210 3211 3211 pr_debug("device = %p\n", device); 3212 3212 3213 - sdev = kzalloc_flex(*sdev, port, device->phys_port_cnt, GFP_KERNEL); 3213 + sdev = kzalloc_flex(*sdev, port, device->phys_port_cnt); 3214 3214 if (!sdev) 3215 3215 return -ENOMEM; 3216 3216
+1 -1
drivers/input/evdev.c
··· 465 465 struct evdev_client *client; 466 466 int error; 467 467 468 - client = kvzalloc_flex(*client, buffer, bufsize, GFP_KERNEL); 468 + client = kvzalloc_flex(*client, buffer, bufsize); 469 469 if (!client) 470 470 return -ENOMEM; 471 471
+1 -1
drivers/input/ff-core.c
··· 303 303 } 304 304 305 305 struct ff_device *ff __free(kfree) = 306 - kzalloc_flex(*ff, effect_owners, max_effects, GFP_KERNEL); 306 + kzalloc_flex(*ff, effect_owners, max_effects); 307 307 if (!ff) 308 308 return -ENOMEM; 309 309
+1 -1
drivers/input/input-leds.c
··· 101 101 if (!num_leds) 102 102 return -ENXIO; 103 103 104 - leds = kzalloc_flex(*leds, leds, num_leds, GFP_KERNEL); 104 + leds = kzalloc_flex(*leds, leds, num_leds); 105 105 if (!leds) 106 106 return -ENOMEM; 107 107
+1 -1
drivers/input/input-mt.c
··· 50 50 return -EINVAL; 51 51 52 52 struct input_mt *mt __free(kfree) = 53 - kzalloc_flex(*mt, slots, num_slots, GFP_KERNEL); 53 + kzalloc_flex(*mt, slots, num_slots); 54 54 if (!mt) 55 55 return -ENOMEM; 56 56
+1 -1
drivers/input/keyboard/omap-keypad.c
··· 193 193 row_shift = get_count_order(pdata->cols); 194 194 keycodemax = pdata->rows << row_shift; 195 195 196 - omap_kp = kzalloc_flex(*omap_kp, keymap, keycodemax, GFP_KERNEL); 196 + omap_kp = kzalloc_flex(*omap_kp, keymap, keycodemax); 197 197 input_dev = input_allocate_device(); 198 198 if (!omap_kp || !input_dev) { 199 199 kfree(omap_kp);
+1 -1
drivers/interconnect/core.c
··· 172 172 struct icc_path *path; 173 173 int i; 174 174 175 - path = kzalloc_flex(*path, reqs, num_nodes, GFP_KERNEL); 175 + path = kzalloc_flex(*path, reqs, num_nodes); 176 176 if (!path) 177 177 return ERR_PTR(-ENOMEM); 178 178
+1 -1
drivers/iommu/iommu.c
··· 3010 3010 return -ENOMEM; 3011 3011 3012 3012 /* Preallocate for the overwhelmingly common case of 1 ID */ 3013 - fwspec = kzalloc_flex(*fwspec, ids, 1, GFP_KERNEL); 3013 + fwspec = kzalloc_flex(*fwspec, ids, 1); 3014 3014 if (!fwspec) 3015 3015 return -ENOMEM; 3016 3016
+1 -1
drivers/md/dm-bio-prison-v1.c
··· 44 44 struct dm_bio_prison *prison; 45 45 46 46 num_locks = dm_num_hash_locks(); 47 - prison = kzalloc_flex(*prison, regions, num_locks, GFP_KERNEL); 47 + prison = kzalloc_flex(*prison, regions, num_locks); 48 48 if (!prison) 49 49 return NULL; 50 50 prison->num_locks = num_locks;
+1 -1
drivers/md/dm-crypt.c
··· 3237 3237 return -EINVAL; 3238 3238 } 3239 3239 3240 - cc = kzalloc_flex(*cc, key, key_size, GFP_KERNEL); 3240 + cc = kzalloc_flex(*cc, key, key_size); 3241 3241 if (!cc) { 3242 3242 ti->error = "Cannot allocate encryption context"; 3243 3243 return -ENOMEM;
+1 -1
drivers/md/dm-raid.c
··· 744 744 return ERR_PTR(-EINVAL); 745 745 } 746 746 747 - rs = kzalloc_flex(*rs, dev, raid_devs, GFP_KERNEL); 747 + rs = kzalloc_flex(*rs, dev, raid_devs); 748 748 if (!rs) { 749 749 ti->error = "Cannot allocate raid context"; 750 750 return ERR_PTR(-ENOMEM);
+1 -1
drivers/md/dm-raid1.c
··· 890 890 struct dm_dirty_log *dl) 891 891 { 892 892 struct mirror_set *ms = 893 - kzalloc_flex(*ms, mirror, nr_mirrors, GFP_KERNEL); 893 + kzalloc_flex(*ms, mirror, nr_mirrors); 894 894 895 895 if (!ms) { 896 896 ti->error = "Cannot allocate mirror context";
+1 -1
drivers/md/dm-stripe.c
··· 129 129 return -EINVAL; 130 130 } 131 131 132 - sc = kmalloc_flex(*sc, stripe, stripes, GFP_KERNEL); 132 + sc = kmalloc_flex(*sc, stripe, stripes); 133 133 if (!sc) { 134 134 ti->error = "Memory allocation for striped context failed"; 135 135 return -ENOMEM;
+1 -1
drivers/md/dm-switch.c
··· 62 62 { 63 63 struct switch_ctx *sctx; 64 64 65 - sctx = kzalloc_flex(*sctx, path_list, nr_paths, GFP_KERNEL); 65 + sctx = kzalloc_flex(*sctx, path_list, nr_paths); 66 66 if (!sctx) 67 67 return NULL; 68 68
+1 -1
drivers/md/md-linear.c
··· 92 92 int cnt; 93 93 int i; 94 94 95 - conf = kzalloc_flex(*conf, disks, raid_disks, GFP_KERNEL); 95 + conf = kzalloc_flex(*conf, disks, raid_disks); 96 96 if (!conf) 97 97 return ERR_PTR(-ENOMEM); 98 98
+1 -1
drivers/media/platform/nvidia/tegra-vde/v4l2.c
··· 811 811 struct tegra_ctx *ctx; 812 812 int err; 813 813 814 - ctx = kzalloc_flex(*ctx, ctrls, ARRAY_SIZE(ctrl_cfgs), GFP_KERNEL); 814 + ctx = kzalloc_flex(*ctx, ctrls, ARRAY_SIZE(ctrl_cfgs)); 815 815 if (!ctx) 816 816 return -ENOMEM; 817 817
+1 -1
drivers/media/v4l2-core/v4l2-event.c
··· 235 235 if (elems < 1) 236 236 elems = 1; 237 237 238 - sev = kvzalloc_flex(*sev, events, elems, GFP_KERNEL); 238 + sev = kvzalloc_flex(*sev, events, elems); 239 239 if (!sev) 240 240 return -ENOMEM; 241 241 sev->elems = elems;
+1 -1
drivers/memstick/host/jmb38x_ms.c
··· 926 926 goto err_out_int; 927 927 } 928 928 929 - jm = kzalloc_flex(*jm, hosts, cnt, GFP_KERNEL); 929 + jm = kzalloc_flex(*jm, hosts, cnt); 930 930 if (!jm) { 931 931 rc = -ENOMEM; 932 932 goto err_out_int;
+1 -1
drivers/misc/bcm-vk/bcm_vk_msg.c
··· 700 700 return -EINVAL; 701 701 } 702 702 703 - entry = kzalloc_flex(*entry, to_v_msg, 1, GFP_KERNEL); 703 + entry = kzalloc_flex(*entry, to_v_msg, 1); 704 704 if (!entry) 705 705 return -ENOMEM; 706 706 entry->to_v_blks = 1; /* always 1 block */
+1 -1
drivers/misc/enclosure.c
··· 117 117 struct enclosure_component_callbacks *cb) 118 118 { 119 119 struct enclosure_device *edev = 120 - kzalloc_flex(*edev, component, components, GFP_KERNEL); 120 + kzalloc_flex(*edev, component, components); 121 121 int err, i; 122 122 123 123 BUG_ON(!cb);
+1 -1
drivers/misc/lkdtm/bugs.c
··· 477 477 { 478 478 struct lkdtm_cb_fam *inst; 479 479 480 - inst = kzalloc_flex(*inst, array, element_count + 1, GFP_KERNEL); 480 + inst = kzalloc_flex(*inst, array, element_count + 1); 481 481 if (!inst) { 482 482 pr_err("FAIL: could not allocate test struct!\n"); 483 483 return;
+1 -1
drivers/misc/ntsync.c
··· 884 884 if (args->alert) 885 885 fds[count] = args->alert; 886 886 887 - q = kmalloc_flex(*q, entries, total_count, GFP_KERNEL); 887 + q = kmalloc_flex(*q, entries, total_count); 888 888 if (!q) 889 889 return -ENOMEM; 890 890 q->task = current;
+1 -1
drivers/misc/tifm_core.c
··· 176 176 { 177 177 struct tifm_adapter *fm; 178 178 179 - fm = kzalloc_flex(*fm, sockets, num_sockets, GFP_KERNEL); 179 + fm = kzalloc_flex(*fm, sockets, num_sockets); 180 180 if (fm) { 181 181 fm->dev.class = &tifm_adapter_class; 182 182 fm->dev.parent = dev;
+1 -1
drivers/mtd/chips/cfi_cmdset_0001.c
··· 776 776 } 777 777 778 778 numvirtchips = cfi->numchips * numparts; 779 - newcfi = kmalloc_flex(*newcfi, chips, numvirtchips, GFP_KERNEL); 779 + newcfi = kmalloc_flex(*newcfi, chips, numvirtchips); 780 780 if (!newcfi) 781 781 return -ENOMEM; 782 782 shared = kmalloc_objs(struct flchip_shared, cfi->numchips,
+1 -1
drivers/mtd/chips/gen_probe.c
··· 134 134 * our caller, and copy the appropriate data into them. 135 135 */ 136 136 137 - retcfi = kmalloc_flex(*retcfi, chips, cfi.numchips, GFP_KERNEL); 137 + retcfi = kmalloc_flex(*retcfi, chips, cfi.numchips); 138 138 139 139 if (!retcfi) { 140 140 kfree(cfi.cfiq);
+1 -1
drivers/mtd/devices/mtd_intel_dg.c
··· 764 764 return -ENODEV; 765 765 } 766 766 767 - nvm = kzalloc_flex(*nvm, regions, nregions, GFP_KERNEL); 767 + nvm = kzalloc_flex(*nvm, regions, nregions); 768 768 if (!nvm) 769 769 return -ENOMEM; 770 770
+1 -1
drivers/mtd/lpddr/qinfo_probe.c
··· 167 167 lpddr.numchips = 1; 168 168 169 169 numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum; 170 - retlpddr = kzalloc_flex(*retlpddr, chips, numvirtchips, GFP_KERNEL); 170 + retlpddr = kzalloc_flex(*retlpddr, chips, numvirtchips); 171 171 if (!retlpddr) 172 172 return NULL; 173 173
+1 -1
drivers/mtd/maps/sa1100-flash.c
··· 170 170 /* 171 171 * Allocate the map_info structs in one go. 172 172 */ 173 - info = kzalloc_flex(*info, subdev, nr, GFP_KERNEL); 173 + info = kzalloc_flex(*info, subdev, nr); 174 174 if (!info) { 175 175 ret = -ENOMEM; 176 176 goto out;
+1 -1
drivers/net/bonding/bond_main.c
··· 5099 5099 5100 5100 usable_slaves = kzalloc_flex(*usable_slaves, arr, bond->slave_cnt, 5101 5101 GFP_KERNEL); 5102 - all_slaves = kzalloc_flex(*all_slaves, arr, bond->slave_cnt, GFP_KERNEL); 5102 + all_slaves = kzalloc_flex(*all_slaves, arr, bond->slave_cnt); 5103 5103 if (!usable_slaves || !all_slaves) { 5104 5104 ret = -ENOMEM; 5105 5105 goto out;
+1 -1
drivers/net/can/usb/gs_usb.c
··· 1560 1560 return -EINVAL; 1561 1561 } 1562 1562 1563 - parent = kzalloc_flex(*parent, canch, icount, GFP_KERNEL); 1563 + parent = kzalloc_flex(*parent, canch, icount); 1564 1564 if (!parent) 1565 1565 return -ENOMEM; 1566 1566
+1 -1
drivers/net/dsa/sja1105/sja1105_tas.c
··· 477 477 if (list_empty(&gating_cfg->entries)) 478 478 return false; 479 479 480 - dummy = kzalloc_flex(*dummy, entries, num_entries, GFP_KERNEL); 480 + dummy = kzalloc_flex(*dummy, entries, num_entries); 481 481 if (!dummy) { 482 482 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory"); 483 483 return true;
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
··· 1654 1654 int i, j; 1655 1655 struct bnx2x_vf_mac_vlan_filters *fl = NULL; 1656 1656 1657 - fl = kzalloc_flex(*fl, filters, tlv->n_mac_vlan_filters, GFP_KERNEL); 1657 + fl = kzalloc_flex(*fl, filters, tlv->n_mac_vlan_filters); 1658 1658 if (!fl) 1659 1659 return -ENOMEM; 1660 1660
+1 -1
drivers/net/ethernet/chelsio/cxgb3/l2t.c
··· 408 408 struct l2t_data *d; 409 409 int i; 410 410 411 - d = kvzalloc_flex(*d, l2tab, l2t_capacity, GFP_KERNEL); 411 + d = kvzalloc_flex(*d, l2tab, l2t_capacity); 412 412 if (!d) 413 413 return NULL; 414 414
+1 -1
drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
··· 287 287 if (clipt_size < CLIPT_MIN_HASH_BUCKETS) 288 288 return NULL; 289 289 290 - ctbl = kvzalloc_flex(*ctbl, hash_list, clipt_size, GFP_KERNEL); 290 + ctbl = kvzalloc_flex(*ctbl, hash_list, clipt_size); 291 291 if (!ctbl) 292 292 return NULL; 293 293
+1 -1
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
··· 501 501 if (!max_tids) 502 502 return NULL; 503 503 504 - t = kvzalloc_flex(*t, table, max_tids, GFP_KERNEL); 504 + t = kvzalloc_flex(*t, table, max_tids); 505 505 if (!t) 506 506 return NULL; 507 507
+1 -1
drivers/net/ethernet/chelsio/cxgb4/l2t.c
··· 620 620 if (l2t_size < L2T_MIN_HASH_BUCKETS) 621 621 return NULL; 622 622 623 - d = kvzalloc_flex(*d, l2tab, l2t_size, GFP_KERNEL); 623 + d = kvzalloc_flex(*d, l2tab, l2t_size); 624 624 if (!d) 625 625 return NULL; 626 626
+1 -1
drivers/net/ethernet/chelsio/cxgb4/sched.c
··· 653 653 struct sched_table *s; 654 654 unsigned int i; 655 655 656 - s = kvzalloc_flex(*s, tab, sched_size, GFP_KERNEL); 656 + s = kvzalloc_flex(*s, tab, sched_size); 657 657 if (!s) 658 658 return NULL; 659 659
+1 -1
drivers/net/ethernet/chelsio/cxgb4/smt.c
··· 47 47 48 48 smt_size = SMT_SIZE; 49 49 50 - s = kvzalloc_flex(*s, smtab, smt_size, GFP_KERNEL); 50 + s = kvzalloc_flex(*s, smtab, smt_size); 51 51 if (!s) 52 52 return NULL; 53 53 s->smt_size = smt_size;
+3 -3
drivers/net/ethernet/engleder/tsnep_selftests.c
··· 354 354 struct tc_taprio_qopt_offload *qopt; 355 355 int i; 356 356 357 - qopt = kzalloc_flex(*qopt, entries, 255, GFP_KERNEL); 357 + qopt = kzalloc_flex(*qopt, entries, 255); 358 358 if (!qopt) 359 359 return false; 360 360 for (i = 0; i < 255; i++) ··· 451 451 struct tc_taprio_qopt_offload *qopt; 452 452 int i; 453 453 454 - qopt = kzalloc_flex(*qopt, entries, 255, GFP_KERNEL); 454 + qopt = kzalloc_flex(*qopt, entries, 255); 455 455 if (!qopt) 456 456 return false; 457 457 for (i = 0; i < 255; i++) ··· 604 604 struct tc_taprio_qopt_offload *qopt; 605 605 int i; 606 606 607 - qopt = kzalloc_flex(*qopt, entries, 255, GFP_KERNEL); 607 + qopt = kzalloc_flex(*qopt, entries, 255); 608 608 if (!qopt) 609 609 return false; 610 610 for (i = 0; i < 255; i++)
+1 -1
drivers/net/ethernet/freescale/enetc/enetc.c
··· 3454 3454 struct enetc_bdr *bdr; 3455 3455 int j, err; 3456 3456 3457 - v = kzalloc_flex(*v, tx_ring, v_tx_rings, GFP_KERNEL); 3457 + v = kzalloc_flex(*v, tx_ring, v_tx_rings); 3458 3458 if (!v) 3459 3459 return -ENOMEM; 3460 3460
+1 -1
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
··· 81 81 vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id); 82 82 qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id); 83 83 84 - vf_cb = kzalloc_flex(*vf_cb, ae_handle.qs, qnum_per_vf, GFP_KERNEL); 84 + vf_cb = kzalloc_flex(*vf_cb, ae_handle.qs, qnum_per_vf); 85 85 if (unlikely(!vf_cb)) { 86 86 dev_err(dsaf_dev->dev, "malloc vf_cb fail!\n"); 87 87 ae_handle = ERR_PTR(-ENOMEM);
+1 -1
drivers/net/ethernet/intel/fm10k/fm10k_main.c
··· 1597 1597 ring_count = txr_count + rxr_count; 1598 1598 1599 1599 /* allocate q_vector and rings */ 1600 - q_vector = kzalloc_flex(*q_vector, ring, ring_count, GFP_KERNEL); 1600 + q_vector = kzalloc_flex(*q_vector, ring, ring_count); 1601 1601 if (!q_vector) 1602 1602 return -ENOMEM; 1603 1603
+1 -1
drivers/net/ethernet/intel/iavf/iavf_ptp.c
··· 133 133 { 134 134 struct iavf_ptp_aq_cmd *cmd; 135 135 136 - cmd = kzalloc_flex(*cmd, msg, msglen, GFP_KERNEL); 136 + cmd = kzalloc_flex(*cmd, msg, msglen); 137 137 if (!cmd) 138 138 return NULL; 139 139
+2 -2
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
··· 200 200 if (q_index >= num_rxq) 201 201 return -EINVAL; 202 202 203 - rule = kzalloc_flex(*rule, rule_info, 1, GFP_KERNEL); 203 + rule = kzalloc_flex(*rule, rule_info, 1); 204 204 if (!rule) 205 205 return -ENOMEM; 206 206 ··· 310 310 if (!idpf_sideband_action_ena(vport, fsp)) 311 311 return -EOPNOTSUPP; 312 312 313 - rule = kzalloc_flex(*rule, rule_info, 1, GFP_KERNEL); 313 + rule = kzalloc_flex(*rule, rule_info, 1); 314 314 if (!rule) 315 315 return -ENOMEM; 316 316
+1 -1
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
··· 740 740 { 741 741 struct idpf_queue_set *qp; 742 742 743 - qp = kzalloc_flex(*qp, qs, num, GFP_KERNEL); 743 + qp = kzalloc_flex(*qp, qs, num); 744 744 if (!qp) 745 745 return NULL; 746 746
+1 -1
drivers/net/ethernet/intel/igc/igc_main.c
··· 4863 4863 /* allocate q_vector and rings */ 4864 4864 q_vector = adapter->q_vector[v_idx]; 4865 4865 if (!q_vector) 4866 - q_vector = kzalloc_flex(*q_vector, ring, ring_count, GFP_KERNEL); 4866 + q_vector = kzalloc_flex(*q_vector, ring, ring_count); 4867 4867 else 4868 4868 memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); 4869 4869 if (!q_vector)
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
··· 858 858 q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count), 859 859 GFP_KERNEL, node); 860 860 if (!q_vector) 861 - q_vector = kzalloc_flex(*q_vector, ring, ring_count, GFP_KERNEL); 861 + q_vector = kzalloc_flex(*q_vector, ring, ring_count); 862 862 if (!q_vector) 863 863 return -ENOMEM; 864 864
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
··· 509 509 char resn[32]; 510 510 int i; 511 511 512 - d = kzalloc_flex(*d, fields, nfile, GFP_KERNEL); 512 + d = kzalloc_flex(*d, fields, nfile); 513 513 if (!d) 514 514 return -ENOMEM; 515 515
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 1548 1548 { 1549 1549 struct mlx5_flow_handle *handle; 1550 1550 1551 - handle = kzalloc_flex(*handle, rule, num_rules, GFP_KERNEL); 1551 + handle = kzalloc_flex(*handle, rule, num_rules); 1552 1552 if (!handle) 1553 1553 return NULL; 1554 1554
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
··· 460 460 alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc); 461 461 bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1; 462 462 463 - fc_bulk = kvzalloc_flex(*fc_bulk, fcs, bulk_len, GFP_KERNEL); 463 + fc_bulk = kvzalloc_flex(*fc_bulk, fcs, bulk_len); 464 464 if (!fc_bulk) 465 465 return NULL; 466 466
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
··· 117 117 return NULL; 118 118 pr_pool_ctx = pool_ctx; 119 119 bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN; 120 - pr_bulk = kvzalloc_flex(*pr_bulk, prs_data, bulk_len, GFP_KERNEL); 120 + pr_bulk = kvzalloc_flex(*pr_bulk, prs_data, bulk_len); 121 121 if (!pr_bulk) 122 122 return NULL; 123 123 ··· 273 273 274 274 pattern = pool_ctx; 275 275 bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN; 276 - mh_bulk = kvzalloc_flex(*mh_bulk, mhs_data, bulk_len, GFP_KERNEL); 276 + mh_bulk = kvzalloc_flex(*mh_bulk, mhs_data, bulk_len); 277 277 if (!mh_bulk) 278 278 return NULL; 279 279
+1 -1
drivers/net/ethernet/mellanox/mlxsw/core_env.c
··· 1453 1453 mlxsw_reg_mgpir_max_modules_per_slot_get(mgpir_pl) : 1454 1454 module_count; 1455 1455 1456 - env = kzalloc_flex(*env, line_cards, num_of_slots + 1, GFP_KERNEL); 1456 + env = kzalloc_flex(*env, line_cards, num_of_slots + 1); 1457 1457 if (!env) 1458 1458 return -ENOMEM; 1459 1459
+1 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
··· 224 224 } 225 225 226 226 nr_entries = div_u64(resource_size, info->alloc_size); 227 - part = kzalloc_flex(*part, usage, BITS_TO_LONGS(nr_entries), GFP_KERNEL); 227 + part = kzalloc_flex(*part, usage, BITS_TO_LONGS(nr_entries)); 228 228 if (!part) 229 229 return ERR_PTR(-ENOMEM); 230 230
+1 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
··· 124 124 struct mlxsw_sp_counter_pool *pool; 125 125 int err; 126 126 127 - pool = kzalloc_flex(*pool, sub_pools, sub_pools_count, GFP_KERNEL); 127 + pool = kzalloc_flex(*pool, sub_pools, sub_pools_count); 128 128 if (!pool) 129 129 return -ENOMEM; 130 130 mlxsw_sp->counter_pool = pool;
+3 -3
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 5198 5198 return -EINVAL; 5199 5199 } 5200 5200 5201 - nhgi = kzalloc_flex(*nhgi, nexthops, nhs, GFP_KERNEL); 5201 + nhgi = kzalloc_flex(*nhgi, nexthops, nhs); 5202 5202 if (!nhgi) 5203 5203 return -ENOMEM; 5204 5204 nh_grp->nhgi = nhgi; ··· 5779 5779 struct mlxsw_sp_nexthop *nh; 5780 5780 int err, i; 5781 5781 5782 - nhgi = kzalloc_flex(*nhgi, nexthops, nhs, GFP_KERNEL); 5782 + nhgi = kzalloc_flex(*nhgi, nexthops, nhs); 5783 5783 if (!nhgi) 5784 5784 return -ENOMEM; 5785 5785 nh_grp->nhgi = nhgi; ··· 7032 7032 struct mlxsw_sp_nexthop *nh; 7033 7033 int err, i; 7034 7034 7035 - nhgi = kzalloc_flex(*nhgi, nexthops, fib6_entry->nrt6, GFP_KERNEL); 7035 + nhgi = kzalloc_flex(*nhgi, nexthops, fib6_entry->nrt6); 7036 7036 if (!nhgi) 7037 7037 return -ENOMEM; 7038 7038 nh_grp->nhgi = nhgi;
+1 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
··· 87 87 return -EIO; 88 88 89 89 entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN); 90 - span = kzalloc_flex(*span, entries, entries_count, GFP_KERNEL); 90 + span = kzalloc_flex(*span, entries, entries_count); 91 91 if (!span) 92 92 return -ENOMEM; 93 93 refcount_set(&span->policer_id_base_ref_count, 0);
+1 -1
drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
··· 1640 1640 return -EIO; 1641 1641 1642 1642 /* Allocate NAPI vector and queue triads */ 1643 - nv = kzalloc_flex(*nv, qt, qt_count, GFP_KERNEL); 1643 + nv = kzalloc_flex(*nv, qt, qt_count); 1644 1644 if (!nv) 1645 1645 return -ENOMEM; 1646 1646
+1 -1
drivers/net/ethernet/microsoft/mana/hw_channel.c
··· 461 461 int err; 462 462 u16 i; 463 463 464 - dma_buf = kzalloc_flex(*dma_buf, reqs, q_depth, GFP_KERNEL); 464 + dma_buf = kzalloc_flex(*dma_buf, reqs, q_depth); 465 465 if (!dma_buf) 466 466 return -ENOMEM; 467 467
+1 -1
drivers/net/ethernet/microsoft/mana/mana_en.c
··· 2637 2637 2638 2638 gc = gd->gdma_context; 2639 2639 2640 - rxq = kzalloc_flex(*rxq, rx_oobs, apc->rx_queue_size, GFP_KERNEL); 2640 + rxq = kzalloc_flex(*rxq, rx_oobs, apc->rx_queue_size); 2641 2641 if (!rxq) 2642 2642 return NULL; 2643 2643
+1 -1
drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
··· 500 500 { 501 501 struct nfp_reprs *reprs; 502 502 503 - reprs = kzalloc_flex(*reprs, reprs, num_reprs, GFP_KERNEL); 503 + reprs = kzalloc_flex(*reprs, reprs, num_reprs); 504 504 if (!reprs) 505 505 return NULL; 506 506 reprs->num_reprs = num_reprs;
+1 -1
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
··· 319 319 goto err; 320 320 } 321 321 322 - table = kzalloc_flex(*table, ports, cnt, GFP_KERNEL); 322 + table = kzalloc_flex(*table, ports, cnt); 323 323 if (!table) 324 324 goto err; 325 325
+3 -3
drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
··· 1095 1095 if (!priv->dma_cap.frpsel) 1096 1096 return -EOPNOTSUPP; 1097 1097 1098 - sel = kzalloc_flex(*sel, keys, nk, GFP_KERNEL); 1098 + sel = kzalloc_flex(*sel, keys, nk); 1099 1099 if (!sel) 1100 1100 return -ENOMEM; 1101 1101 ··· 1368 1368 cls->command = FLOW_CLS_REPLACE; 1369 1369 cls->cookie = dummy_cookie; 1370 1370 1371 - rule = kzalloc_flex(*rule, action.entries, 1, GFP_KERNEL); 1371 + rule = kzalloc_flex(*rule, action.entries, 1); 1372 1372 if (!rule) { 1373 1373 ret = -ENOMEM; 1374 1374 goto cleanup_cls; ··· 1496 1496 cls->command = FLOW_CLS_REPLACE; 1497 1497 cls->cookie = dummy_cookie; 1498 1498 1499 - rule = kzalloc_flex(*rule, action.entries, 1, GFP_KERNEL); 1499 + rule = kzalloc_flex(*rule, action.entries, 1); 1500 1500 if (!rule) { 1501 1501 ret = -ENOMEM; 1502 1502 goto cleanup_cls;
+1 -1
drivers/net/ethernet/wangxun/libwx/wx_lib.c
··· 2095 2095 /* note this will allocate space for the ring structure as well! */ 2096 2096 ring_count = txr_count + rxr_count; 2097 2097 2098 - q_vector = kzalloc_flex(*q_vector, ring, ring_count, GFP_KERNEL); 2098 + q_vector = kzalloc_flex(*q_vector, ring, ring_count); 2099 2099 if (!q_vector) 2100 2100 return -ENOMEM; 2101 2101
+1 -1
drivers/net/wan/wanxl.c
··· 598 598 ports = 4; 599 599 } 600 600 601 - card = kzalloc_flex(*card, ports, ports, GFP_KERNEL); 601 + card = kzalloc_flex(*card, ports, ports); 602 602 if (!card) { 603 603 pci_release_regions(pdev); 604 604 pci_disable_device(pdev);
+1 -1
drivers/net/wireless/ath/ath11k/ce.c
··· 615 615 struct ath11k_ce_ring *ce_ring; 616 616 dma_addr_t base_addr; 617 617 618 - ce_ring = kzalloc_flex(*ce_ring, skb, nentries, GFP_KERNEL); 618 + ce_ring = kzalloc_flex(*ce_ring, skb, nentries); 619 619 if (ce_ring == NULL) 620 620 return ERR_PTR(-ENOMEM); 621 621
+1 -1
drivers/net/wireless/ath/ath11k/reg.c
··· 146 146 if (WARN_ON(!num_channels)) 147 147 return -EINVAL; 148 148 149 - params = kzalloc_flex(*params, ch_param, num_channels, GFP_KERNEL); 149 + params = kzalloc_flex(*params, ch_param, num_channels); 150 150 if (!params) 151 151 return -ENOMEM; 152 152
+1 -1
drivers/net/wireless/ath/ath12k/ce.c
··· 332 332 struct ath12k_ce_ring *ce_ring; 333 333 dma_addr_t base_addr; 334 334 335 - ce_ring = kzalloc_flex(*ce_ring, skb, nentries, GFP_KERNEL); 335 + ce_ring = kzalloc_flex(*ce_ring, skb, nentries); 336 336 if (!ce_ring) 337 337 return ERR_PTR(-ENOMEM); 338 338
+1 -1
drivers/net/wireless/ath/ath12k/reg.c
··· 170 170 return -EINVAL; 171 171 } 172 172 173 - arg = kzalloc_flex(*arg, channel, num_channels, GFP_KERNEL); 173 + arg = kzalloc_flex(*arg, channel, num_channels); 174 174 175 175 if (!arg) 176 176 return -ENOMEM;
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
··· 23 23 { 24 24 struct brcmf_fweh_info *fweh; 25 25 26 - fweh = kzalloc_flex(*fweh, evt_handler, BRCMF_BCA_E_LAST, GFP_KERNEL); 26 + fweh = kzalloc_flex(*fweh, evt_handler, BRCMF_BCA_E_LAST); 27 27 if (!fweh) 28 28 return -ENOMEM; 29 29
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
··· 132 132 133 133 brcmf_dbg(TRACE, "Enter\n"); 134 134 135 - chunk_buf = kzalloc_flex(*chunk_buf, data, MAX_CHUNK_LEN, GFP_KERNEL); 135 + chunk_buf = kzalloc_flex(*chunk_buf, data, MAX_CHUNK_LEN); 136 136 if (!chunk_buf) { 137 137 err = -ENOMEM; 138 138 return -ENOMEM;
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
··· 66 66 { 67 67 struct brcmf_fweh_info *fweh; 68 68 69 - fweh = kzalloc_flex(*fweh, evt_handler, BRCMF_CYW_E_LAST, GFP_KERNEL); 69 + fweh = kzalloc_flex(*fweh, evt_handler, BRCMF_CYW_E_LAST); 70 70 if (!fweh) 71 71 return -ENOMEM; 72 72
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
··· 825 825 return NULL; 826 826 } 827 827 828 - fwreq = kzalloc_flex(*fwreq, items, n_fwnames, GFP_KERNEL); 828 + fwreq = kzalloc_flex(*fwreq, items, n_fwnames); 829 829 if (!fwreq) 830 830 return NULL; 831 831
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
··· 24 24 { 25 25 struct brcmf_fweh_info *fweh; 26 26 27 - fweh = kzalloc_flex(*fweh, evt_handler, BRCMF_WCC_E_LAST, GFP_KERNEL); 27 + fweh = kzalloc_flex(*fweh, evt_handler, BRCMF_WCC_E_LAST); 28 28 if (!fweh) 29 29 return -ENOMEM; 30 30
+1 -1
drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
··· 1067 1067 if (WARN_ON(!cfg || !cfg->eeprom_params)) 1068 1068 return NULL; 1069 1069 1070 - data = kzalloc_flex(*data, channels, IWL_NUM_CHANNELS, GFP_KERNEL); 1070 + data = kzalloc_flex(*data, channels, IWL_NUM_CHANNELS); 1071 1071 if (!data) 1072 1072 return NULL; 1073 1073
+1 -1
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
··· 71 71 u32 len = le32_to_cpu(tlv->length); 72 72 struct iwl_dbg_tlv_node *node; 73 73 74 - node = kzalloc_flex(*node, tlv.data, len, GFP_KERNEL); 74 + node = kzalloc_flex(*node, tlv.data, len); 75 75 if (!node) 76 76 return NULL; 77 77
+2 -2
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
··· 1687 1687 num_of_ch); 1688 1688 1689 1689 /* build a regdomain rule for every valid channel */ 1690 - regd = kzalloc_flex(*regd, reg_rules, num_of_ch, GFP_KERNEL); 1690 + regd = kzalloc_flex(*regd, reg_rules, num_of_ch); 1691 1691 if (!regd) 1692 1692 return ERR_PTR(-ENOMEM); 1693 1693 ··· 2036 2036 if (empty_otp) 2037 2037 IWL_INFO(trans, "OTP is empty\n"); 2038 2038 2039 - nvm = kzalloc_flex(*nvm, channels, IWL_NUM_CHANNELS, GFP_KERNEL); 2039 + nvm = kzalloc_flex(*nvm, channels, IWL_NUM_CHANNELS); 2040 2040 if (!nvm) { 2041 2041 ret = -ENOMEM; 2042 2042 goto out;
+1 -1
drivers/net/wireless/intel/iwlwifi/mld/d3.c
··· 1172 1172 for (int k = 0; k < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; k++) 1173 1173 n_channels += 1174 1174 hweight8(matches[i].matching_channels[k]); 1175 - match = kzalloc_flex(*match, channels, n_channels, GFP_KERNEL); 1175 + match = kzalloc_flex(*match, channels, n_channels); 1176 1176 if (!match) 1177 1177 return; 1178 1178
+1 -1
drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
··· 2092 2092 return -EINVAL; 2093 2093 2094 2094 WARN_ON(rcu_access_pointer(mld_sta->ptk_pn[keyidx])); 2095 - *ptk_pn = kzalloc_flex(**ptk_pn, q, num_rx_queues, GFP_KERNEL); 2095 + *ptk_pn = kzalloc_flex(**ptk_pn, q, num_rx_queues); 2096 2096 if (!*ptk_pn) 2097 2097 return -ENOMEM; 2098 2098
+2 -2
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
··· 2605 2605 n_matches = 0; 2606 2606 } 2607 2607 2608 - net_detect = kzalloc_flex(*net_detect, matches, n_matches, GFP_KERNEL); 2608 + net_detect = kzalloc_flex(*net_detect, matches, n_matches); 2609 2609 if (!net_detect || !n_matches) 2610 2610 goto out_report_nd; 2611 2611 net_detect->n_matches = n_matches; ··· 2619 2619 d3_data->nd_results, 2620 2620 i); 2621 2621 2622 - match = kzalloc_flex(*match, channels, n_channels, GFP_KERNEL); 2622 + match = kzalloc_flex(*match, channels, n_channels); 2623 2623 if (!match) 2624 2624 goto out_report_nd; 2625 2625 match->n_channels = n_channels;
+1 -1
drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
··· 1052 1052 if (WARN_ON_ONCE(num_chan > NL80211_MAX_SUPP_REG_RULES)) 1053 1053 return ERR_PTR(-EINVAL); 1054 1054 1055 - regd = kzalloc_flex(*regd, reg_rules, num_chan, GFP_KERNEL); 1055 + regd = kzalloc_flex(*regd, reg_rules, num_chan); 1056 1056 if (!regd) 1057 1057 return ERR_PTR(-ENOMEM); 1058 1058
+1 -1
drivers/net/wireless/mediatek/mt76/agg-rx.c
··· 248 248 249 249 mt76_rx_aggr_stop(dev, wcid, tidno); 250 250 251 - tid = kzalloc_flex(*tid, reorder_buf, size, GFP_KERNEL); 251 + tid = kzalloc_flex(*tid, reorder_buf, size); 252 252 if (!tid) 253 253 return -ENOMEM; 254 254
+1 -1
drivers/net/wireless/realtek/rtw89/acpi.c
··· 115 115 goto out; 116 116 } 117 117 118 - data = kzalloc_flex(*data, buf, len, GFP_KERNEL); 118 + data = kzalloc_flex(*data, buf, len); 119 119 if (!data) 120 120 goto out; 121 121
+1 -1
drivers/nvme/target/admin-cmd.c
··· 542 542 u16 status; 543 543 544 544 status = NVME_SC_INTERNAL; 545 - desc = kmalloc_flex(*desc, nsids, NVMET_MAX_NAMESPACES, GFP_KERNEL); 545 + desc = kmalloc_flex(*desc, nsids, NVMET_MAX_NAMESPACES); 546 546 if (!desc) 547 547 goto out; 548 548
+1 -1
drivers/nvme/target/fc.c
··· 789 789 if (qid > NVMET_NR_QUEUES) 790 790 return NULL; 791 791 792 - queue = kzalloc_flex(*queue, fod, sqsize, GFP_KERNEL); 792 + queue = kzalloc_flex(*queue, fod, sqsize); 793 793 if (!queue) 794 794 return NULL; 795 795
+1 -1
drivers/pci/npem.c
··· 524 524 int led_idx = 0; 525 525 int ret; 526 526 527 - npem = kzalloc_flex(*npem, leds, supported_cnt, GFP_KERNEL); 527 + npem = kzalloc_flex(*npem, leds, supported_cnt); 528 528 if (!npem) 529 529 return -ENOMEM; 530 530
+1 -1
drivers/platform/chrome/wilco_ec/event.c
··· 106 106 { 107 107 struct ec_event_queue *q; 108 108 109 - q = kzalloc_flex(*q, entries, capacity, GFP_KERNEL); 109 + q = kzalloc_flex(*q, entries, capacity); 110 110 if (!q) 111 111 return NULL; 112 112
+2 -2
drivers/platform/x86/amd/pmc/mp1_stb.c
··· 141 141 u32 fsize; 142 142 143 143 fsize = dev->dram_size - S2D_RSVD_RAM_SPACE; 144 - stb_data_arr = kmalloc_flex(*stb_data_arr, data, fsize, GFP_KERNEL); 144 + stb_data_arr = kmalloc_flex(*stb_data_arr, data, fsize); 145 145 if (!stb_data_arr) 146 146 return -ENOMEM; 147 147 ··· 189 189 } 190 190 191 191 fsize = min(num_samples, S2D_TELEMETRY_BYTES_MAX); 192 - stb_data_arr = kmalloc_flex(*stb_data_arr, data, fsize, GFP_KERNEL); 192 + stb_data_arr = kmalloc_flex(*stb_data_arr, data, fsize); 193 193 if (!stb_data_arr) 194 194 return -ENOMEM; 195 195
+1 -1
drivers/platform/x86/intel/int3472/discrete.c
··· 107 107 int ret; 108 108 109 109 struct gpiod_lookup_table *lookup __free(kfree) = 110 - kzalloc_flex(*lookup, table, 2, GFP_KERNEL); 110 + kzalloc_flex(*lookup, table, 2); 111 111 if (!lookup) 112 112 return ERR_PTR(-ENOMEM); 113 113
+1 -1
drivers/platform/x86/x86-android-tablets/core.c
··· 49 49 struct gpiod_lookup_table *lookup; 50 50 struct gpio_desc *gpiod; 51 51 52 - lookup = kzalloc_flex(*lookup, table, 2, GFP_KERNEL); 52 + lookup = kzalloc_flex(*lookup, table, 2); 53 53 if (!lookup) 54 54 return -ENOMEM; 55 55
+1 -1
drivers/power/supply/cros_peripheral_charger.c
··· 64 64 struct cros_ec_command *msg; 65 65 int ret; 66 66 67 - msg = kzalloc_flex(*msg, data, max(outsize, insize), GFP_KERNEL); 67 + msg = kzalloc_flex(*msg, data, max(outsize, insize)); 68 68 if (!msg) 69 69 return -ENOMEM; 70 70
+1 -1
drivers/power/supply/cros_usbpd-charger.c
··· 94 94 struct cros_ec_command *msg; 95 95 int ret; 96 96 97 - msg = kzalloc_flex(*msg, data, max(outsize, insize), GFP_KERNEL); 97 + msg = kzalloc_flex(*msg, data, max(outsize, insize)); 98 98 if (!msg) 99 99 return -ENOMEM; 100 100
+1 -1
drivers/pwm/core.c
··· 2138 2138 if (!chip->operational) 2139 2139 return -ENXIO; 2140 2140 2141 - cdata = kzalloc_flex(*cdata, pwm, chip->npwm, GFP_KERNEL); 2141 + cdata = kzalloc_flex(*cdata, pwm, chip->npwm); 2142 2142 if (!cdata) 2143 2143 return -ENOMEM; 2144 2144
+1 -1
drivers/reset/core.c
··· 1360 1360 if (num < 0) 1361 1361 return optional ? NULL : ERR_PTR(num); 1362 1362 1363 - resets = kzalloc_flex(*resets, rstc, num, GFP_KERNEL); 1363 + resets = kzalloc_flex(*resets, rstc, num); 1364 1364 if (!resets) 1365 1365 return ERR_PTR(-ENOMEM); 1366 1366 resets->num_rstcs = num;
+1 -1
drivers/rtc/rtc-sun6i.c
··· 238 238 return; 239 239 240 240 rtc->data = data; 241 - clk_data = kzalloc_flex(*clk_data, hws, 3, GFP_KERNEL); 241 + clk_data = kzalloc_flex(*clk_data, hws, 3); 242 242 if (!clk_data) { 243 243 kfree(rtc); 244 244 return;
+1 -1
drivers/s390/cio/ccwgroup.c
··· 322 322 if (num_devices < 1) 323 323 return -EINVAL; 324 324 325 - gdev = kzalloc_flex(*gdev, cdev, num_devices, GFP_KERNEL); 325 + gdev = kzalloc_flex(*gdev, cdev, num_devices); 326 326 if (!gdev) 327 327 return -ENOMEM; 328 328
+1 -1
drivers/spi/spi-axi-spi-engine.c
··· 815 815 p_dry.length = 0; 816 816 spi_engine_compile_message(msg, true, &p_dry); 817 817 818 - p = kzalloc_flex(*p, instructions, p_dry.length + 1, GFP_KERNEL); 818 + p = kzalloc_flex(*p, instructions, p_dry.length + 1); 819 819 if (!p) 820 820 return -ENOMEM; 821 821
+1 -1
drivers/spi/spi-bcm2835.c
··· 1313 1313 * More on the problem that it addresses: 1314 1314 * https://www.spinics.net/lists/linux-gpio/msg36218.html 1315 1315 */ 1316 - lookup = kzalloc_flex(*lookup, table, 2, GFP_KERNEL); 1316 + lookup = kzalloc_flex(*lookup, table, 2); 1317 1317 if (!lookup) { 1318 1318 ret = -ENOMEM; 1319 1319 goto err_cleanup;
+1 -1
drivers/staging/greybus/raw.c
··· 73 73 goto exit; 74 74 } 75 75 76 - raw_data = kmalloc_flex(*raw_data, data, len, GFP_KERNEL); 76 + raw_data = kmalloc_flex(*raw_data, data, len); 77 77 if (!raw_data) { 78 78 retval = -ENOMEM; 79 79 goto exit;
+1 -1
drivers/staging/rtl8723bs/os_dep/osdep_service.c
··· 193 193 { 194 194 struct rtw_cbuf *cbuf; 195 195 196 - cbuf = kzalloc_flex(*cbuf, bufs, size, GFP_KERNEL); 196 + cbuf = kzalloc_flex(*cbuf, bufs, size); 197 197 cbuf->size = size; 198 198 199 199 return cbuf;
+1 -1
drivers/target/target_core_file.c
··· 276 276 ssize_t len = 0; 277 277 int ret = 0, i; 278 278 279 - aio_cmd = kmalloc_flex(*aio_cmd, bvecs, sgl_nents, GFP_KERNEL); 279 + aio_cmd = kmalloc_flex(*aio_cmd, bvecs, sgl_nents); 280 280 if (!aio_cmd) 281 281 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 282 282
+1 -1
drivers/thermal/thermal_core.c
··· 1542 1542 if (!thermal_class) 1543 1543 return ERR_PTR(-ENODEV); 1544 1544 1545 - tz = kzalloc_flex(*tz, trips, num_trips, GFP_KERNEL); 1545 + tz = kzalloc_flex(*tz, trips, num_trips); 1546 1546 if (!tz) 1547 1547 return ERR_PTR(-ENOMEM); 1548 1548
+1 -1
drivers/thermal/thermal_debugfs.c
··· 559 559 struct tz_episode *tze; 560 560 int i; 561 561 562 - tze = kzalloc_flex(*tze, trip_stats, tz->num_trips, GFP_KERNEL); 562 + tze = kzalloc_flex(*tze, trip_stats, tz->num_trips); 563 563 if (!tze) 564 564 return NULL; 565 565
+1 -1
drivers/tty/hvc/hvc_console.c
··· 922 922 return ERR_PTR(err); 923 923 } 924 924 925 - hp = kzalloc_flex(*hp, outbuf, outbuf_size, GFP_KERNEL); 925 + hp = kzalloc_flex(*hp, outbuf, outbuf_size); 926 926 if (!hp) 927 927 return ERR_PTR(-ENOMEM); 928 928
+1 -1
drivers/tty/n_hdlc.c
··· 669 669 unsigned int i; 670 670 671 671 for (i = 0; i < count; i++) { 672 - buf = kmalloc_flex(*buf, buf, maxframe, GFP_KERNEL); 672 + buf = kmalloc_flex(*buf, buf, maxframe); 673 673 if (!buf) { 674 674 pr_debug("%s(), kmalloc() failed for %s buffer %u\n", 675 675 __func__, name, i);
+1 -1
drivers/tty/serial/8250/8250_pci.c
··· 4148 4148 nr_ports = rc; 4149 4149 } 4150 4150 4151 - priv = kzalloc_flex(*priv, line, nr_ports, GFP_KERNEL); 4151 + priv = kzalloc_flex(*priv, line, nr_ports); 4152 4152 if (!priv) { 4153 4153 priv = ERR_PTR(-ENOMEM); 4154 4154 goto err_deinit;
+1 -1
drivers/usb/core/config.c
··· 823 823 nalts[i] = j = USB_MAXALTSETTING; 824 824 } 825 825 826 - intfc = kzalloc_flex(*intfc, altsetting, j, GFP_KERNEL); 826 + intfc = kzalloc_flex(*intfc, altsetting, j); 827 827 config->intf_cache[i] = intfc; 828 828 if (!intfc) 829 829 return -ENOMEM;
+1 -1
drivers/usb/gadget/function/f_fs.c
··· 957 957 data_len, ret); 958 958 959 959 data_len -= ret; 960 - buf = kmalloc_flex(*buf, storage, data_len, GFP_KERNEL); 960 + buf = kmalloc_flex(*buf, storage, data_len); 961 961 if (!buf) 962 962 return -ENOMEM; 963 963 buf->length = data_len;
+1 -1
drivers/usb/gadget/function/f_midi.c
··· 1361 1361 } 1362 1362 1363 1363 /* allocate and initialize one new instance */ 1364 - midi = kzalloc_flex(*midi, in_ports_array, opts->in_ports, GFP_KERNEL); 1364 + midi = kzalloc_flex(*midi, in_ports_array, opts->in_ports); 1365 1365 if (!midi) { 1366 1366 status = -ENOMEM; 1367 1367 goto setup_fail;
+1 -1
drivers/usb/gadget/function/f_phonet.c
··· 669 669 struct f_phonet *fp; 670 670 struct f_phonet_opts *opts; 671 671 672 - fp = kzalloc_flex(*fp, out_reqv, phonet_rxq_size, GFP_KERNEL); 672 + fp = kzalloc_flex(*fp, out_reqv, phonet_rxq_size); 673 673 if (!fp) 674 674 return ERR_PTR(-ENOMEM); 675 675
+1 -1
drivers/usb/host/xhci-mtk-sch.c
··· 264 264 else 265 265 len = 1; 266 266 267 - sch_ep = kzalloc_flex(*sch_ep, bw_budget_table, len, GFP_KERNEL); 267 + sch_ep = kzalloc_flex(*sch_ep, bw_budget_table, len); 268 268 if (!sch_ep) 269 269 return ERR_PTR(-ENOMEM); 270 270
+1 -1
drivers/vhost/vhost.c
··· 1979 1979 return -EOPNOTSUPP; 1980 1980 if (mem.nregions > max_mem_regions) 1981 1981 return -E2BIG; 1982 - newmem = kvzalloc_flex(*newmem, regions, mem.nregions, GFP_KERNEL); 1982 + newmem = kvzalloc_flex(*newmem, regions, mem.nregions); 1983 1983 if (!newmem) 1984 1984 return -ENOMEM; 1985 1985
+1 -1
drivers/video/fbdev/mmp/core.c
··· 155 155 struct mmp_path *path = NULL; 156 156 struct mmp_panel *panel; 157 157 158 - path = kzalloc_flex(*path, overlays, info->overlay_num, GFP_KERNEL); 158 + path = kzalloc_flex(*path, overlays, info->overlay_num); 159 159 if (!path) 160 160 return NULL; 161 161
+1 -1
drivers/virt/coco/guest/tsm-mr.c
··· 176 176 const struct bin_attribute **attrs __free(kfree) = 177 177 kzalloc(sizeof(*attrs) * (tm->nr_mrs + 1) + nlen, GFP_KERNEL); 178 178 struct tm_context *ctx __free(kfree) = 179 - kzalloc_flex(*ctx, mrs, tm->nr_mrs, GFP_KERNEL); 179 + kzalloc_flex(*ctx, mrs, tm->nr_mrs); 180 180 char *name, *end; 181 181 182 182 if (!ctx || !attrs)
+1 -1
drivers/xen/privcmd-buf.c
··· 141 141 if (!(vma->vm_flags & VM_SHARED)) 142 142 return -EINVAL; 143 143 144 - vma_priv = kzalloc_flex(*vma_priv, pages, count, GFP_KERNEL); 144 + vma_priv = kzalloc_flex(*vma_priv, pages, count); 145 145 if (!vma_priv) 146 146 return -ENOMEM; 147 147
+1 -1
drivers/xen/xenbus/xenbus_dev_frontend.c
··· 195 195 if (len > XENSTORE_PAYLOAD_MAX) 196 196 return -EINVAL; 197 197 198 - rb = kmalloc_flex(*rb, msg, len, GFP_KERNEL); 198 + rb = kmalloc_flex(*rb, msg, len); 199 199 if (rb == NULL) 200 200 return -ENOMEM; 201 201
+1 -1
drivers/zorro/zorro.c
··· 135 135 int error; 136 136 137 137 /* Initialize the Zorro bus */ 138 - bus = kzalloc_flex(*bus, devices, zorro_num_autocon, GFP_KERNEL); 138 + bus = kzalloc_flex(*bus, devices, zorro_num_autocon); 139 139 if (!bus) 140 140 return -ENOMEM; 141 141
+1 -1
fs/afs/addr_list.c
··· 66 66 if (nr > AFS_MAX_ADDRESSES) 67 67 nr = AFS_MAX_ADDRESSES; 68 68 69 - alist = kzalloc_flex(*alist, addrs, nr, GFP_KERNEL); 69 + alist = kzalloc_flex(*alist, addrs, nr); 70 70 if (!alist) 71 71 return NULL; 72 72
+1 -1
fs/afs/addr_prefs.c
··· 401 401 max_prefs = min_t(size_t, (psize - sizeof(*old)) / sizeof(old->prefs[0]), 255); 402 402 403 403 ret = -ENOMEM; 404 - preflist = kmalloc_flex(*preflist, prefs, max_prefs, GFP_KERNEL); 404 + preflist = kmalloc_flex(*preflist, prefs, max_prefs); 405 405 if (!preflist) 406 406 goto done; 407 407
+1 -1
fs/afs/fsclient.c
··· 2010 2010 size = call->count2 = ntohl(call->tmp); 2011 2011 size = round_up(size, 4); 2012 2012 2013 - acl = kmalloc_flex(*acl, data, size, GFP_KERNEL); 2013 + acl = kmalloc_flex(*acl, data, size); 2014 2014 if (!acl) 2015 2015 return -ENOMEM; 2016 2016 op->acl = acl;
+1 -1
fs/afs/server_list.c
··· 51 51 newrep++; 52 52 } 53 53 54 - slist = kzalloc_flex(*slist, servers, nr_servers, GFP_KERNEL); 54 + slist = kzalloc_flex(*slist, servers, nr_servers); 55 55 if (!slist) 56 56 goto error; 57 57
+2 -2
fs/afs/vl_list.c
··· 15 15 struct afs_vlserver *vlserver; 16 16 static atomic_t debug_ids; 17 17 18 - vlserver = kzalloc_flex(*vlserver, name, name_len + 1, GFP_KERNEL); 18 + vlserver = kzalloc_flex(*vlserver, name, name_len + 1); 19 19 if (vlserver) { 20 20 refcount_set(&vlserver->ref, 1); 21 21 rwlock_init(&vlserver->lock); ··· 51 51 { 52 52 struct afs_vlserver_list *vllist; 53 53 54 - vllist = kzalloc_flex(*vllist, servers, nr_servers, GFP_KERNEL); 54 + vllist = kzalloc_flex(*vllist, servers, nr_servers); 55 55 if (vllist) { 56 56 refcount_set(&vllist->ref, 1); 57 57 rwlock_init(&vllist->lock);
+1 -1
fs/afs/xattr.c
··· 75 75 { 76 76 struct afs_acl *acl; 77 77 78 - acl = kmalloc_flex(*acl, data, size, GFP_KERNEL); 78 + acl = kmalloc_flex(*acl, data, size); 79 79 if (!acl) { 80 80 afs_op_nomem(op); 81 81 return false;
+2 -2
fs/afs/yfsclient.c
··· 2048 2048 size = round_up(size, 4); 2049 2049 2050 2050 if (yacl->flags & YFS_ACL_WANT_ACL) { 2051 - acl = kmalloc_flex(*acl, data, size, GFP_KERNEL); 2051 + acl = kmalloc_flex(*acl, data, size); 2052 2052 if (!acl) 2053 2053 return -ENOMEM; 2054 2054 yacl->acl = acl; ··· 2080 2080 size = round_up(size, 4); 2081 2081 2082 2082 if (yacl->flags & YFS_ACL_WANT_VOL_ACL) { 2083 - acl = kmalloc_flex(*acl, data, size, GFP_KERNEL); 2083 + acl = kmalloc_flex(*acl, data, size); 2084 2084 if (!acl) 2085 2085 return -ENOMEM; 2086 2086 yacl->vol_acl = acl;
+1 -1
fs/aio.c
··· 693 693 new_nr = (table ? table->nr : 1) * 4; 694 694 spin_unlock(&mm->ioctx_lock); 695 695 696 - table = kzalloc_flex(*table, table, new_nr, GFP_KERNEL); 696 + table = kzalloc_flex(*table, table, new_nr); 697 697 if (!table) 698 698 return -ENOMEM; 699 699
+1 -1
fs/binfmt_elf.c
··· 1880 1880 1881 1881 info->thread->task = dump_task; 1882 1882 for (ct = dump_task->signal->core_state->dumper.next; ct; ct = ct->next) { 1883 - t = kzalloc_flex(*t, notes, info->thread_notes, GFP_KERNEL); 1883 + t = kzalloc_flex(*t, notes, info->thread_notes); 1884 1884 if (unlikely(!t)) 1885 1885 return 0; 1886 1886
+1 -1
fs/binfmt_elf_fdpic.c
··· 761 761 if (nloads == 0) 762 762 return -ELIBBAD; 763 763 764 - loadmap = kzalloc_flex(*loadmap, segs, nloads, GFP_KERNEL); 764 + loadmap = kzalloc_flex(*loadmap, segs, nloads); 765 765 if (!loadmap) 766 766 return -ENOMEM; 767 767
+1 -1
fs/btrfs/inode.c
··· 1692 1692 const blk_opf_t write_flags = wbc_to_write_flags(wbc); 1693 1693 1694 1694 nofs_flag = memalloc_nofs_save(); 1695 - ctx = kvmalloc_flex(*ctx, chunks, num_chunks, GFP_KERNEL); 1695 + ctx = kvmalloc_flex(*ctx, chunks, num_chunks); 1696 1696 memalloc_nofs_restore(nofs_flag); 1697 1697 if (!ctx) 1698 1698 return false;
+1 -1
fs/btrfs/raid56.c
··· 208 208 * Try harder to allocate and fallback to vmalloc to lower the chance 209 209 * of a failing mount. 210 210 */ 211 - table = kvzalloc_flex(*table, table, num_entries, GFP_KERNEL); 211 + table = kvzalloc_flex(*table, table, num_entries); 212 212 if (!table) 213 213 return -ENOMEM; 214 214
+1 -1
fs/ext4/dir.c
··· 480 480 p = &info->root.rb_node; 481 481 482 482 /* Create and allocate the fname structure */ 483 - new_fn = kzalloc_flex(*new_fn, name, ent_name->len + 1, GFP_KERNEL); 483 + new_fn = kzalloc_flex(*new_fn, name, ent_name->len + 1); 484 484 if (!new_fn) 485 485 return -ENOMEM; 486 486 new_fn->hash = hash;
+1 -1
fs/fs-writeback.c
··· 724 724 int nr; 725 725 bool restart = false; 726 726 727 - isw = kzalloc_flex(*isw, inodes, WB_MAX_INODES_PER_ISW, GFP_KERNEL); 727 + isw = kzalloc_flex(*isw, inodes, WB_MAX_INODES_PER_ISW); 728 728 if (!isw) 729 729 return restart; 730 730
+1 -1
fs/jffs2/acl.c
··· 133 133 size_t i; 134 134 135 135 *size = jffs2_acl_size(acl->a_count); 136 - header = kmalloc_flex(*header, a_entries, acl->a_count, GFP_KERNEL); 136 + header = kmalloc_flex(*header, a_entries, acl->a_count); 137 137 if (!header) 138 138 return ERR_PTR(-ENOMEM); 139 139 header->a_version = cpu_to_je32(JFFS2_ACL_VERSION);
+1 -1
fs/netfs/fscache_volume.c
··· 230 230 if (IS_ERR(cache)) 231 231 return NULL; 232 232 233 - volume = kzalloc_flex(*volume, coherency, coherency_len, GFP_KERNEL); 233 + volume = kzalloc_flex(*volume, coherency, coherency_len); 234 234 if (!volume) 235 235 goto err_cache; 236 236
+3 -3
fs/nfsd/blocklayout.c
··· 131 131 * layouts, so make sure to zero the whole structure. 132 132 */ 133 133 nfserr = nfserrno(-ENOMEM); 134 - bl = kzalloc_flex(*bl, extents, nr_extents_max, GFP_KERNEL); 134 + bl = kzalloc_flex(*bl, extents, nr_extents_max); 135 135 if (!bl) 136 136 goto out_error; 137 137 bl->nr_extents = nr_extents_max; ··· 208 208 struct pnfs_block_deviceaddr *dev; 209 209 struct pnfs_block_volume *b; 210 210 211 - dev = kzalloc_flex(*dev, volumes, 1, GFP_KERNEL); 211 + dev = kzalloc_flex(*dev, volumes, 1); 212 212 if (!dev) 213 213 return -ENOMEM; 214 214 gdp->gd_device = dev; ··· 319 319 const struct pr_ops *ops; 320 320 int ret; 321 321 322 - dev = kzalloc_flex(*dev, volumes, 1, GFP_KERNEL); 322 + dev = kzalloc_flex(*dev, volumes, 1); 323 323 if (!dev) 324 324 return -ENOMEM; 325 325 gdp->gd_device = dev;
+1 -1
fs/nfsd/nfs4xdr.c
··· 124 124 { 125 125 struct svcxdr_tmpbuf *tb; 126 126 127 - tb = kmalloc_flex(*tb, buf, len, GFP_KERNEL); 127 + tb = kmalloc_flex(*tb, buf, len); 128 128 if (!tb) 129 129 return NULL; 130 130 tb->next = argp->to_free;
+1 -1
fs/ocfs2/journal.c
··· 177 177 osb->recovery_thread_task = NULL; 178 178 init_waitqueue_head(&osb->recovery_event); 179 179 180 - rm = kzalloc_flex(*rm, rm_entries, osb->max_slots, GFP_KERNEL); 180 + rm = kzalloc_flex(*rm, rm_entries, osb->max_slots); 181 181 if (!rm) { 182 182 mlog_errno(-ENOMEM); 183 183 return -ENOMEM;
+1 -1
fs/ocfs2/slot_map.c
··· 425 425 struct inode *inode = NULL; 426 426 struct ocfs2_slot_info *si; 427 427 428 - si = kzalloc_flex(*si, si_slots, osb->max_slots, GFP_KERNEL); 428 + si = kzalloc_flex(*si, si_slots, osb->max_slots); 429 429 if (!si) { 430 430 status = -ENOMEM; 431 431 mlog_errno(status);
+1 -1
fs/overlayfs/readdir.c
··· 180 180 { 181 181 struct ovl_cache_entry *p; 182 182 183 - p = kmalloc_flex(*p, name, len + 1, GFP_KERNEL); 183 + p = kmalloc_flex(*p, name, len + 1); 184 184 if (!p) 185 185 return NULL; 186 186
+1 -1
fs/overlayfs/util.c
··· 143 143 { 144 144 struct ovl_entry *oe; 145 145 146 - oe = kzalloc_flex(*oe, __lowerstack, numlower, GFP_KERNEL); 146 + oe = kzalloc_flex(*oe, __lowerstack, numlower); 147 147 if (oe) 148 148 oe->__numlower = numlower; 149 149
+1 -1
fs/smb/client/smb2ops.c
··· 1888 1888 goto out; 1889 1889 } 1890 1890 1891 - cc_req = kzalloc_flex(*cc_req, Chunks, chunk_count, GFP_KERNEL); 1891 + cc_req = kzalloc_flex(*cc_req, Chunks, chunk_count); 1892 1892 if (!cc_req) { 1893 1893 rc = -ENOMEM; 1894 1894 goto out;
+1 -1
fs/udf/super.c
··· 1047 1047 struct udf_bitmap *bitmap; 1048 1048 int nr_groups = udf_compute_nr_groups(sb, index); 1049 1049 1050 - bitmap = kvzalloc_flex(*bitmap, s_block_bitmap, nr_groups, GFP_KERNEL); 1050 + bitmap = kvzalloc_flex(*bitmap, s_block_bitmap, nr_groups); 1051 1051 if (!bitmap) 1052 1052 return NULL; 1053 1053
+1 -1
fs/xfs/xfs_buf_mem.c
··· 58 58 struct xfs_buftarg *btp; 59 59 int error; 60 60 61 - btp = kzalloc_flex(*btp, bt_cache, 1, GFP_KERNEL); 61 + btp = kzalloc_flex(*btp, bt_cache, 1); 62 62 if (!btp) 63 63 return -ENOMEM; 64 64
+1 -1
init/initramfs.c
··· 153 153 { 154 154 struct dir_entry *de; 155 155 156 - de = kmalloc_flex(*de, name, nlen, GFP_KERNEL); 156 + de = kmalloc_flex(*de, name, nlen); 157 157 if (!de) 158 158 panic_show_mem("can't allocate dir_entry buffer"); 159 159 INIT_LIST_HEAD(&de->list);
+1 -1
io_uring/rsrc.c
··· 113 113 { 114 114 if (nr_bvecs <= IO_CACHED_BVECS_SEGS) 115 115 return io_cache_alloc(&ctx->imu_cache, GFP_KERNEL); 116 - return kvmalloc_flex(struct io_mapped_ubuf, bvec, nr_bvecs, GFP_KERNEL); 116 + return kvmalloc_flex(struct io_mapped_ubuf, bvec, nr_bvecs); 117 117 } 118 118 119 119 static void io_free_imu(struct io_ring_ctx *ctx, struct io_mapped_ubuf *imu)
+1 -1
kernel/audit.c
··· 1517 1517 if (err < 0) 1518 1518 return err; 1519 1519 } 1520 - sig_data = kmalloc_flex(*sig_data, ctx, lsmctx.len, GFP_KERNEL); 1520 + sig_data = kmalloc_flex(*sig_data, ctx, lsmctx.len); 1521 1521 if (!sig_data) { 1522 1522 if (lsmprop_is_set(&audit_sig_lsm)) 1523 1523 security_release_secctx(&lsmctx);
+2 -2
kernel/audit_tree.c
··· 96 96 size_t sz; 97 97 98 98 sz = strlen(s) + 1; 99 - tree = kmalloc_flex(*tree, pathname, sz, GFP_KERNEL); 99 + tree = kmalloc_flex(*tree, pathname, sz); 100 100 if (tree) { 101 101 refcount_set(&tree->count, 1); 102 102 tree->goner = 0; ··· 192 192 struct audit_chunk *chunk; 193 193 int i; 194 194 195 - chunk = kzalloc_flex(*chunk, owners, count, GFP_KERNEL); 195 + chunk = kzalloc_flex(*chunk, owners, count); 196 196 if (!chunk) 197 197 return NULL; 198 198
+1 -1
kernel/auditfilter.c
··· 638 638 void *bufp; 639 639 int i; 640 640 641 - data = kzalloc_flex(*data, buf, krule->buflen, GFP_KERNEL); 641 + data = kzalloc_flex(*data, buf, krule->buflen); 642 642 if (unlikely(!data)) 643 643 return NULL; 644 644
+1 -1
kernel/bpf/btf.c
··· 9617 9617 9618 9618 tab = btf->struct_ops_tab; 9619 9619 if (!tab) { 9620 - tab = kzalloc_flex(*tab, ops, 4, GFP_KERNEL); 9620 + tab = kzalloc_flex(*tab, ops, 4); 9621 9621 if (!tab) 9622 9622 return -ENOMEM; 9623 9623 tab->capacity = 4;
+1 -1
kernel/cgroup/cgroup.c
··· 5844 5844 int ret; 5845 5845 5846 5846 /* allocate the cgroup and its ID, 0 is reserved for the root */ 5847 - cgrp = kzalloc_flex(*cgrp, _low_ancestors, level, GFP_KERNEL); 5847 + cgrp = kzalloc_flex(*cgrp, _low_ancestors, level); 5848 5848 if (!cgrp) 5849 5849 return ERR_PTR(-ENOMEM); 5850 5850
+1 -1
kernel/gcov/fs.c
··· 116 116 /* Dry-run to get the actual buffer size. */ 117 117 size = convert_to_gcda(NULL, info); 118 118 119 - iter = kvmalloc_flex(*iter, buffer, size, GFP_KERNEL); 119 + iter = kvmalloc_flex(*iter, buffer, size); 120 120 if (!iter) 121 121 return NULL; 122 122
+1 -1
kernel/irq/generic-chip.c
··· 240 240 { 241 241 struct irq_chip_generic *gc; 242 242 243 - gc = kzalloc_flex(*gc, chip_types, num_ct, GFP_KERNEL); 243 + gc = kzalloc_flex(*gc, chip_types, num_ct); 244 244 if (gc) { 245 245 irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base, 246 246 handler);
+1 -1
kernel/irq/matrix.c
··· 51 51 unsigned int cpu, matrix_size = BITS_TO_LONGS(matrix_bits); 52 52 struct irq_matrix *m; 53 53 54 - m = kzalloc_flex(*m, scratch_map, matrix_size * 2, GFP_KERNEL); 54 + m = kzalloc_flex(*m, scratch_map, matrix_size * 2); 55 55 if (!m) 56 56 return NULL; 57 57
+1 -1
kernel/kprobes.c
··· 172 172 } while (c->nr_garbage && collect_garbage_slots(c) == 0); 173 173 174 174 /* All out of space. Need to allocate a new page. */ 175 - kip = kmalloc_flex(*kip, slot_used, slots_per_page(c), GFP_KERNEL); 175 + kip = kmalloc_flex(*kip, slot_used, slots_per_page(c)); 176 176 if (!kip) 177 177 return NULL; 178 178
+2 -2
kernel/module/sysfs.c
··· 74 74 for (i = 0; i < info->hdr->e_shnum; i++) 75 75 if (!sect_empty(&info->sechdrs[i])) 76 76 nloaded++; 77 - sect_attrs = kzalloc_flex(*sect_attrs, attrs, nloaded, GFP_KERNEL); 77 + sect_attrs = kzalloc_flex(*sect_attrs, attrs, nloaded); 78 78 if (!sect_attrs) 79 79 return -ENOMEM; 80 80 ··· 166 166 if (notes == 0) 167 167 return 0; 168 168 169 - notes_attrs = kzalloc_flex(*notes_attrs, attrs, notes, GFP_KERNEL); 169 + notes_attrs = kzalloc_flex(*notes_attrs, attrs, notes); 170 170 if (!notes_attrs) 171 171 return -ENOMEM; 172 172
+1 -1
kernel/trace/fprobe.c
··· 749 749 return -E2BIG; 750 750 fp->entry_data_size = size; 751 751 752 - hlist_array = kzalloc_flex(*hlist_array, array, num, GFP_KERNEL); 752 + hlist_array = kzalloc_flex(*hlist_array, array, num); 753 753 if (!hlist_array) 754 754 return -ENOMEM; 755 755
+1 -1
kernel/trace/trace_eprobe.c
··· 211 211 sys_name = event->class->system; 212 212 event_name = trace_event_name(event); 213 213 214 - ep = kzalloc_flex(*ep, tp.args, nargs, GFP_KERNEL); 214 + ep = kzalloc_flex(*ep, tp.args, nargs); 215 215 if (!ep) { 216 216 trace_event_put_ref(event); 217 217 return ERR_PTR(-ENOMEM);
+1 -1
kernel/trace/trace_fprobe.c
··· 579 579 struct trace_fprobe *tf __free(free_trace_fprobe) = NULL; 580 580 int ret = -ENOMEM; 581 581 582 - tf = kzalloc_flex(*tf, tp.args, nargs, GFP_KERNEL); 582 + tf = kzalloc_flex(*tf, tp.args, nargs); 583 583 if (!tf) 584 584 return ERR_PTR(ret); 585 585
+1 -1
kernel/trace/trace_kprobe.c
··· 275 275 struct trace_kprobe *tk __free(free_trace_kprobe) = NULL; 276 276 int ret = -ENOMEM; 277 277 278 - tk = kzalloc_flex(*tk, tp.args, nargs, GFP_KERNEL); 278 + tk = kzalloc_flex(*tk, tp.args, nargs); 279 279 if (!tk) 280 280 return ERR_PTR(ret); 281 281
+1 -1
kernel/trace/trace_uprobe.c
··· 338 338 struct trace_uprobe *tu; 339 339 int ret; 340 340 341 - tu = kzalloc_flex(*tu, tp.args, nargs, GFP_KERNEL); 341 + tu = kzalloc_flex(*tu, tp.args, nargs); 342 342 if (!tu) 343 343 return ERR_PTR(-ENOMEM); 344 344
+1 -1
kernel/tracepoint.c
··· 103 103 104 104 static inline void *allocate_probes(int count) 105 105 { 106 - struct tp_probes *p = kmalloc_flex(*p, probes, count, GFP_KERNEL); 106 + struct tp_probes *p = kmalloc_flex(*p, probes, count); 107 107 return p == NULL ? NULL : p->probes; 108 108 } 109 109
+1 -1
kernel/watch_queue.c
··· 358 358 * user-specified filters. 359 359 */ 360 360 ret = -ENOMEM; 361 - wfilter = kzalloc_flex(*wfilter, filters, nr_filter, GFP_KERNEL); 361 + wfilter = kzalloc_flex(*wfilter, filters, nr_filter); 362 362 if (!wfilter) 363 363 goto err_filter; 364 364 wfilter->nr_filters = nr_filter;
+1 -1
kernel/workqueue.c
··· 5370 5370 attrs->affn_scope >= WQ_AFFN_NR_TYPES)) 5371 5371 return ERR_PTR(-EINVAL); 5372 5372 5373 - ctx = kzalloc_flex(*ctx, pwq_tbl, nr_cpu_ids, GFP_KERNEL); 5373 + ctx = kzalloc_flex(*ctx, pwq_tbl, nr_cpu_ids); 5374 5374 5375 5375 new_attrs = alloc_workqueue_attrs(); 5376 5376 if (!ctx || !new_attrs)
+4 -4
lib/assoc_array.c
··· 741 741 keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE); 742 742 keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT; 743 743 744 - new_s0 = kzalloc_flex(*new_s0, index_key, keylen, GFP_KERNEL); 744 + new_s0 = kzalloc_flex(*new_s0, index_key, keylen); 745 745 if (!new_s0) 746 746 return false; 747 747 edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0); ··· 848 848 keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE); 849 849 keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT; 850 850 851 - new_s0 = kzalloc_flex(*new_s0, index_key, keylen, GFP_KERNEL); 851 + new_s0 = kzalloc_flex(*new_s0, index_key, keylen); 852 852 if (!new_s0) 853 853 return false; 854 854 edit->new_meta[1] = assoc_array_shortcut_to_ptr(new_s0); ··· 897 897 keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE); 898 898 keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT; 899 899 900 - new_s1 = kzalloc_flex(*new_s1, index_key, keylen, GFP_KERNEL); 900 + new_s1 = kzalloc_flex(*new_s1, index_key, keylen); 901 901 if (!new_s1) 902 902 return false; 903 903 edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s1); ··· 1489 1489 shortcut = assoc_array_ptr_to_shortcut(cursor); 1490 1490 keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE); 1491 1491 keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT; 1492 - new_s = kmalloc_flex(*new_s, index_key, keylen, GFP_KERNEL); 1492 + new_s = kmalloc_flex(*new_s, index_key, keylen); 1493 1493 if (!new_s) 1494 1494 goto enomem; 1495 1495 pr_devel("dup shortcut %p -> %p\n", shortcut, new_s);
+1 -1
lib/test_bpf.c
··· 15461 15461 int which, err; 15462 15462 15463 15463 /* Allocate the table of programs to be used for tail calls */ 15464 - progs = kzalloc_flex(*progs, ptrs, ntests + 1, GFP_KERNEL); 15464 + progs = kzalloc_flex(*progs, ptrs, ntests + 1); 15465 15465 if (!progs) 15466 15466 goto out_nomem; 15467 15467
+1 -1
mm/hugetlb_cgroup.c
··· 139 139 struct hugetlb_cgroup *h_cgroup; 140 140 int node; 141 141 142 - h_cgroup = kzalloc_flex(*h_cgroup, nodeinfo, nr_node_ids, GFP_KERNEL); 142 + h_cgroup = kzalloc_flex(*h_cgroup, nodeinfo, nr_node_ids); 143 143 144 144 if (!h_cgroup) 145 145 return ERR_PTR(-ENOMEM);
+1 -1
mm/madvise.c
··· 91 91 92 92 /* Add 1 for NUL terminator at the end of the anon_name->name */ 93 93 count = strlen(name) + 1; 94 - anon_name = kmalloc_flex(*anon_name, name, count, GFP_KERNEL); 94 + anon_name = kmalloc_flex(*anon_name, name, count); 95 95 if (anon_name) { 96 96 kref_init(&anon_name->kref); 97 97 memcpy(anon_name->name, name, count);
+1 -1
mm/mempolicy.c
··· 3880 3880 { 3881 3881 int nid, err; 3882 3882 3883 - wi_group = kzalloc_flex(*wi_group, nattrs, nr_node_ids, GFP_KERNEL); 3883 + wi_group = kzalloc_flex(*wi_group, nattrs, nr_node_ids); 3884 3884 if (!wi_group) 3885 3885 return -ENOMEM; 3886 3886 mutex_init(&wi_group->kobj_lock);
+1 -1
net/bluetooth/hci_core.c
··· 797 797 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) 798 798 return -EINVAL; 799 799 800 - dl = kzalloc_flex(*dl, dev_req, dev_num, GFP_KERNEL); 800 + dl = kzalloc_flex(*dl, dev_req, dev_num); 801 801 if (!dl) 802 802 return -ENOMEM; 803 803
+1 -1
net/bluetooth/mgmt.c
··· 3360 3360 i++; 3361 3361 } 3362 3362 3363 - rp = kmalloc_flex(*rp, addr, i, GFP_KERNEL); 3363 + rp = kmalloc_flex(*rp, addr, i); 3364 3364 if (!rp) { 3365 3365 err = -ENOMEM; 3366 3366 goto unlock;
+1 -1
net/bluetooth/rfcomm/tty.c
··· 510 510 if (!dev_num || dev_num > (PAGE_SIZE * 4) / sizeof(*di)) 511 511 return -EINVAL; 512 512 513 - dl = kzalloc_flex(*dl, dev_info, dev_num, GFP_KERNEL); 513 + dl = kzalloc_flex(*dl, dev_info, dev_num); 514 514 if (!dl) 515 515 return -ENOMEM; 516 516
+1 -1
net/core/bpf_sk_storage.c
··· 500 500 nr_maps++; 501 501 } 502 502 503 - diag = kzalloc_flex(*diag, maps, nr_maps, GFP_KERNEL); 503 + diag = kzalloc_flex(*diag, maps, nr_maps); 504 504 if (!diag) 505 505 return ERR_PTR(-ENOMEM); 506 506
+1 -1
net/core/dev.c
··· 6510 6510 6511 6511 static struct flush_backlogs *flush_backlogs_alloc(void) 6512 6512 { 6513 - return kmalloc_flex(struct flush_backlogs, w, nr_cpu_ids, GFP_KERNEL); 6513 + return kmalloc_flex(struct flush_backlogs, w, nr_cpu_ids); 6514 6514 } 6515 6515 6516 6516 static struct flush_backlogs *flush_backlogs_fallback;
+1 -1
net/core/flow_offload.c
··· 12 12 struct flow_rule *rule; 13 13 int i; 14 14 15 - rule = kzalloc_flex(*rule, action.entries, num_actions, GFP_KERNEL); 15 + rule = kzalloc_flex(*rule, action.entries, num_actions); 16 16 if (!rule) 17 17 return NULL; 18 18
+1 -1
net/devlink/core.c
··· 418 418 if (!devlink_reload_actions_valid(ops)) 419 419 return NULL; 420 420 421 - devlink = kvzalloc_flex(*devlink, priv, priv_size, GFP_KERNEL); 421 + devlink = kvzalloc_flex(*devlink, priv, priv_size); 422 422 if (!devlink) 423 423 return NULL; 424 424
+2 -2
net/ethtool/common.c
··· 687 687 if (rule_cnt <= 0) 688 688 return -EINVAL; 689 689 690 - info = kvzalloc_flex(*info, rule_locs, rule_cnt, GFP_KERNEL); 690 + info = kvzalloc_flex(*info, rule_locs, rule_cnt); 691 691 if (!info) 692 692 return -ENOMEM; 693 693 ··· 841 841 if (rule_cnt < 0) 842 842 return -EINVAL; 843 843 844 - info = kvzalloc_flex(*info, rule_locs, rule_cnt, GFP_KERNEL); 844 + info = kvzalloc_flex(*info, rule_locs, rule_cnt); 845 845 if (!info) 846 846 return -ENOMEM; 847 847
+1 -1
net/ipv4/fib_semantics.c
··· 1399 1399 1400 1400 fib_info_hash_grow(net); 1401 1401 1402 - fi = kzalloc_flex(*fi, fib_nh, nhs, GFP_KERNEL); 1402 + fi = kzalloc_flex(*fi, fib_nh, nhs); 1403 1403 if (!fi) { 1404 1404 err = -ENOBUFS; 1405 1405 goto failure;
+1 -1
net/ipv4/nexthop.c
··· 549 549 { 550 550 struct nh_group *nhg; 551 551 552 - nhg = kzalloc_flex(*nhg, nh_entries, num_nh, GFP_KERNEL); 552 + nhg = kzalloc_flex(*nhg, nh_entries, num_nh); 553 553 if (nhg) 554 554 nhg->num_nh = num_nh; 555 555
+1 -1
net/ipv4/udp_tunnel_nic.c
··· 753 753 struct udp_tunnel_nic *utn; 754 754 unsigned int i; 755 755 756 - utn = kzalloc_flex(*utn, entries, n_tables, GFP_KERNEL); 756 + utn = kzalloc_flex(*utn, entries, n_tables); 757 757 if (!utn) 758 758 return NULL; 759 759 utn->n_tables = n_tables;
+1 -1
net/netfilter/nft_set_pipapo.c
··· 2236 2236 if (field_count > NFT_PIPAPO_MAX_FIELDS) 2237 2237 return -EINVAL; 2238 2238 2239 - m = kmalloc_flex(*m, f, field_count, GFP_KERNEL); 2239 + m = kmalloc_flex(*m, f, field_count); 2240 2240 if (!m) 2241 2241 return -ENOMEM; 2242 2242
+1 -1
net/netfilter/xt_hashlimit.c
··· 293 293 if (size < 16) 294 294 size = 16; 295 295 } 296 - hinfo = kvmalloc_flex(*hinfo, hash, size, GFP_KERNEL); 296 + hinfo = kvmalloc_flex(*hinfo, hash, size); 297 297 if (hinfo == NULL) 298 298 return -ENOMEM; 299 299 *out_hinfo = hinfo;
+1 -1
net/netfilter/xt_recent.c
··· 391 391 goto out; 392 392 } 393 393 394 - t = kvzalloc_flex(*t, iphash, ip_list_hash_size, GFP_KERNEL); 394 + t = kvzalloc_flex(*t, iphash, ip_list_hash_size); 395 395 if (t == NULL) { 396 396 ret = -ENOMEM; 397 397 goto out;
+1 -1
net/openvswitch/meter.c
··· 69 69 { 70 70 struct dp_meter_instance *ti; 71 71 72 - ti = kvzalloc_flex(*ti, dp_meters, size, GFP_KERNEL); 72 + ti = kvzalloc_flex(*ti, dp_meters, size); 73 73 if (!ti) 74 74 return NULL; 75 75
+5 -5
net/sched/cls_u32.c
··· 364 364 void *key = tc_u_common_ptr(tp); 365 365 struct tc_u_common *tp_c = tc_u_common_find(key); 366 366 367 - root_ht = kzalloc_flex(*root_ht, ht, 1, GFP_KERNEL); 367 + root_ht = kzalloc_flex(*root_ht, ht, 1); 368 368 if (root_ht == NULL) 369 369 return -ENOBUFS; 370 370 ··· 825 825 struct tc_u32_sel *s = &n->sel; 826 826 struct tc_u_knode *new; 827 827 828 - new = kzalloc_flex(*new, sel.keys, s->nkeys, GFP_KERNEL); 828 + new = kzalloc_flex(*new, sel.keys, s->nkeys); 829 829 if (!new) 830 830 return NULL; 831 831 ··· 974 974 NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table"); 975 975 return -EINVAL; 976 976 } 977 - ht = kzalloc_flex(*ht, ht, divisor + 1, GFP_KERNEL); 977 + ht = kzalloc_flex(*ht, ht, divisor + 1); 978 978 if (ht == NULL) 979 979 return -ENOBUFS; 980 980 if (handle == 0) { ··· 1104 1104 goto erridr; 1105 1105 } 1106 1106 1107 - n = kzalloc_flex(*n, sel.keys, s->nkeys, GFP_KERNEL); 1107 + n = kzalloc_flex(*n, sel.keys, s->nkeys); 1108 1108 if (n == NULL) { 1109 1109 err = -ENOBUFS; 1110 1110 goto erridr; ··· 1417 1417 goto nla_put_failure; 1418 1418 } 1419 1419 #ifdef CONFIG_CLS_U32_PERF 1420 - gpf = kzalloc_flex(*gpf, kcnts, n->sel.nkeys, GFP_KERNEL); 1420 + gpf = kzalloc_flex(*gpf, kcnts, n->sel.nkeys); 1421 1421 if (!gpf) 1422 1422 goto nla_put_failure; 1423 1423
+1 -1
net/sched/sch_api.c
··· 530 530 return ERR_PTR(-EINVAL); 531 531 } 532 532 533 - stab = kmalloc_flex(*stab, data, tsize, GFP_KERNEL); 533 + stab = kmalloc_flex(*stab, data, tsize); 534 534 if (!stab) 535 535 return ERR_PTR(-ENOMEM); 536 536
+1 -1
net/sched/sch_netem.c
··· 814 814 if (!n || n > NETEM_DIST_MAX) 815 815 return -EINVAL; 816 816 817 - d = kvmalloc_flex(*d, table, n, GFP_KERNEL); 817 + d = kvmalloc_flex(*d, table, n); 818 818 if (!d) 819 819 return -ENOMEM; 820 820
+1 -1
net/sunrpc/svcsock.c
··· 1436 1436 return ERR_PTR(sendpages); 1437 1437 1438 1438 pages = svc_serv_maxpages(serv); 1439 - svsk = kzalloc_flex(*svsk, sk_pages, pages, GFP_KERNEL); 1439 + svsk = kzalloc_flex(*svsk, sk_pages, pages); 1440 1440 if (!svsk) 1441 1441 return ERR_PTR(-ENOMEM); 1442 1442
+1 -1
net/sunrpc/xprtrdma/svc_rdma_pcl.c
··· 29 29 { 30 30 struct svc_rdma_chunk *chunk; 31 31 32 - chunk = kmalloc_flex(*chunk, ch_segments, segcount, GFP_KERNEL); 32 + chunk = kmalloc_flex(*chunk, ch_segments, segcount); 33 33 if (!chunk) 34 34 return NULL; 35 35
+7 -7
net/wireless/nl80211.c
··· 5333 5333 if (n_entries > wiphy->max_acl_mac_addrs) 5334 5334 return ERR_PTR(-EOPNOTSUPP); 5335 5335 5336 - acl = kzalloc_flex(*acl, mac_addrs, n_entries, GFP_KERNEL); 5336 + acl = kzalloc_flex(*acl, mac_addrs, n_entries); 5337 5337 if (!acl) 5338 5338 return ERR_PTR(-ENOMEM); 5339 5339 acl->n_acl_entries = n_entries; ··· 6113 6113 num_elems++; 6114 6114 } 6115 6115 6116 - elems = kzalloc_flex(*elems, elem, num_elems, GFP_KERNEL); 6116 + elems = kzalloc_flex(*elems, elem, num_elems); 6117 6117 if (!elems) 6118 6118 return ERR_PTR(-ENOMEM); 6119 6119 elems->cnt = num_elems; ··· 6145 6145 num_elems++; 6146 6146 } 6147 6147 6148 - elems = kzalloc_flex(*elems, elem, num_elems, GFP_KERNEL); 6148 + elems = kzalloc_flex(*elems, elem, num_elems); 6149 6149 if (!elems) 6150 6150 return ERR_PTR(-ENOMEM); 6151 6151 elems->cnt = num_elems; ··· 10157 10157 goto out; 10158 10158 } 10159 10159 10160 - rd = kzalloc_flex(*rd, reg_rules, num_rules, GFP_KERNEL); 10160 + rd = kzalloc_flex(*rd, reg_rules, num_rules); 10161 10161 if (!rd) { 10162 10162 r = -ENOMEM; 10163 10163 goto out; ··· 15378 15378 if (n_rules > coalesce->n_rules) 15379 15379 return -EINVAL; 15380 15380 15381 - new_coalesce = kzalloc_flex(*new_coalesce, rules, n_rules, GFP_KERNEL); 15381 + new_coalesce = kzalloc_flex(*new_coalesce, rules, n_rules); 15382 15382 if (!new_coalesce) 15383 15383 return -ENOMEM; 15384 15384 ··· 17462 17462 rem_conf) 17463 17463 num_conf++; 17464 17464 17465 - tid_config = kzalloc_flex(*tid_config, tid_conf, num_conf, GFP_KERNEL); 17465 + tid_config = kzalloc_flex(*tid_config, tid_conf, num_conf); 17466 17466 if (!tid_config) 17467 17467 return -ENOMEM; 17468 17468 ··· 18256 18256 if (specs > rdev->wiphy.sar_capa->num_freq_ranges) 18257 18257 return -EINVAL; 18258 18258 18259 - sar_spec = kzalloc_flex(*sar_spec, sub_specs, specs, GFP_KERNEL); 18259 + sar_spec = kzalloc_flex(*sar_spec, sub_specs, specs); 18260 18260 if (!sar_spec) 18261 18261 return -ENOMEM; 18262 18262
+1 -1
net/wireless/pmsr.c
··· 312 312 } 313 313 } 314 314 315 - req = kzalloc_flex(*req, peers, count, GFP_KERNEL); 315 + req = kzalloc_flex(*req, peers, count); 316 316 if (!req) 317 317 return -ENOMEM; 318 318 req->n_peers = count;
+3 -3
net/wireless/reg.c
··· 452 452 struct ieee80211_regdomain *regd; 453 453 unsigned int i; 454 454 455 - regd = kzalloc_flex(*regd, reg_rules, src_regd->n_reg_rules, GFP_KERNEL); 455 + regd = kzalloc_flex(*regd, reg_rules, src_regd->n_reg_rules); 456 456 if (!regd) 457 457 return ERR_PTR(-ENOMEM); 458 458 ··· 932 932 struct ieee80211_regdomain *regdom; 933 933 unsigned int i; 934 934 935 - regdom = kzalloc_flex(*regdom, reg_rules, coll->n_rules, GFP_KERNEL); 935 + regdom = kzalloc_flex(*regdom, reg_rules, coll->n_rules); 936 936 if (!regdom) 937 937 return -ENOMEM; 938 938 ··· 1530 1530 if (!num_rules) 1531 1531 return NULL; 1532 1532 1533 - rd = kzalloc_flex(*rd, reg_rules, num_rules, GFP_KERNEL); 1533 + rd = kzalloc_flex(*rd, reg_rules, num_rules); 1534 1534 if (!rd) 1535 1535 return NULL; 1536 1536
+1 -1
net/wireless/scan.c
··· 1085 1085 if (!n_channels) 1086 1086 return cfg80211_scan_6ghz(rdev, true); 1087 1087 1088 - request = kzalloc_flex(*request, req.channels, n_channels, GFP_KERNEL); 1088 + request = kzalloc_flex(*request, req.channels, n_channels); 1089 1089 if (!request) 1090 1090 return -ENOMEM; 1091 1091
+1 -1
net/xdp/xsk_buff_pool.c
··· 59 59 u32 i, entries; 60 60 61 61 entries = unaligned ? umem->chunks : 0; 62 - pool = kvzalloc_flex(*pool, free_heads, entries, GFP_KERNEL); 62 + pool = kvzalloc_flex(*pool, free_heads, entries); 63 63 if (!pool) 64 64 goto out; 65 65
+1 -1
security/integrity/ima/ima_modsig.c
··· 65 65 buf_len -= sig_len + sizeof(*sig); 66 66 67 67 /* Allocate sig_len additional bytes to hold the raw PKCS#7 data. */ 68 - hdr = kzalloc_flex(*hdr, raw_pkcs7, sig_len, GFP_KERNEL); 68 + hdr = kzalloc_flex(*hdr, raw_pkcs7, sig_len); 69 69 if (!hdr) 70 70 return -ENOMEM; 71 71
+1 -1
security/integrity/ima/ima_policy.c
··· 342 342 return ERR_PTR(-EINVAL); 343 343 } 344 344 345 - opt_list = kzalloc_flex(*opt_list, items, count, GFP_KERNEL); 345 + opt_list = kzalloc_flex(*opt_list, items, count); 346 346 if (!opt_list) { 347 347 kfree(src_copy); 348 348 return ERR_PTR(-ENOMEM);
+1 -1
security/landlock/domain.c
··· 95 95 * caller. 96 96 */ 97 97 details = 98 - kzalloc_flex(*details, exe_path, path_size, GFP_KERNEL); 98 + kzalloc_flex(*details, exe_path, path_size); 99 99 if (!details) 100 100 return ERR_PTR(-ENOMEM); 101 101
+1 -1
security/loadpin/loadpin.c
··· 327 327 328 328 len /= 2; 329 329 330 - trd = kzalloc_flex(*trd, data, len, GFP_KERNEL); 330 + trd = kzalloc_flex(*trd, data, len); 331 331 if (!trd) { 332 332 rc = -ENOMEM; 333 333 goto err;
+1 -1
sound/core/control.c
··· 233 233 if (count == 0 || count > MAX_CONTROL_COUNT) 234 234 return -EINVAL; 235 235 236 - *kctl = kzalloc_flex(**kctl, vd, count, GFP_KERNEL); 236 + *kctl = kzalloc_flex(**kctl, vd, count); 237 237 if (!*kctl) 238 238 return -ENOMEM; 239 239
+1 -1
sound/core/vmaster.c
··· 256 256 struct link_master *master_link = snd_kcontrol_chip(master); 257 257 struct link_follower *srec; 258 258 259 - srec = kzalloc_flex(*srec, follower.vd, follower->count, GFP_KERNEL); 259 + srec = kzalloc_flex(*srec, follower.vd, follower->count); 260 260 if (!srec) 261 261 return -ENOMEM; 262 262 srec->kctl = follower;
+1 -1
sound/hda/common/codec.c
··· 117 117 { 118 118 struct hda_conn_list *p; 119 119 120 - p = kmalloc_flex(*p, conns, len, GFP_KERNEL); 120 + p = kmalloc_flex(*p, conns, len); 121 121 if (!p) 122 122 return -ENOMEM; 123 123 p->len = len;
+2 -2
sound/soc/codecs/sigmadsp.c
··· 270 270 271 271 length -= sizeof(*data_chunk); 272 272 273 - data = kzalloc_flex(*data, data, length, GFP_KERNEL); 273 + data = kzalloc_flex(*data, data, length); 274 274 if (!data) 275 275 return -ENOMEM; 276 276 ··· 413 413 if (len < 3) 414 414 return -EINVAL; 415 415 416 - data = kzalloc_flex(*data, data, size_sub(len, 2), GFP_KERNEL); 416 + data = kzalloc_flex(*data, data, size_sub(len, 2)); 417 417 if (!data) 418 418 return -ENOMEM; 419 419
+1 -1
sound/soc/soc-dapm.c
··· 1435 1435 list_for_each(it, widgets) 1436 1436 size++; 1437 1437 1438 - *list = kzalloc_flex(**list, widgets, size, GFP_KERNEL); 1438 + *list = kzalloc_flex(**list, widgets, size); 1439 1439 if (*list == NULL) 1440 1440 return -ENOMEM; 1441 1441
+1 -1
sound/soc/sof/intel/telemetry.c
··· 71 71 break; 72 72 } 73 73 74 - xoops = kzalloc_flex(*xoops, ar, XTENSA_CORE_AR_REGS_COUNT, GFP_KERNEL); 74 + xoops = kzalloc_flex(*xoops, ar, XTENSA_CORE_AR_REGS_COUNT); 75 75 if (!xoops) 76 76 goto free_block; 77 77
+1 -1
sound/usb/usx2y/usbusx2yaudio.c
··· 657 657 struct urb *urb; 658 658 659 659 if (usx2y->rate != rate) { 660 - us = kzalloc_flex(*us, urb, NOOF_SETRATE_URBS, GFP_KERNEL); 660 + us = kzalloc_flex(*us, urb, NOOF_SETRATE_URBS); 661 661 if (!us) { 662 662 err = -ENOMEM; 663 663 goto cleanup;
+1 -1
sound/virtio/virtio_pcm_msg.c
··· 146 146 int sg_num = virtsnd_pcm_sg_num(data, period_bytes); 147 147 struct virtio_pcm_msg *msg; 148 148 149 - msg = kzalloc_flex(*msg, sgs, sg_num + 2, GFP_KERNEL); 149 + msg = kzalloc_flex(*msg, sgs, sg_num + 2); 150 150 if (!msg) 151 151 return -ENOMEM; 152 152