Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'powerpc-6.2-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

- Fix a build failure with some versions of ld that have an odd version
string

- Fix incorrect use of mutex in the IMC PMU driver

Thanks to Kajol Jain, Michael Petlan, Ojaswin Mujoo, Peter Zijlstra, and
Yang Yingliang.

* tag 'powerpc-6.2-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
powerpc/64s/hash: Make stress_hpt_timer_fn() static
powerpc/imc-pmu: Fix use of mutex in IRQs disabled section
powerpc/boot: Fix incorrect version calculation issue in ld_version

+72 -72
+4
arch/powerpc/boot/wrapper
··· 210 210 gsub(".*version ", ""); 211 211 gsub("-.*", ""); 212 212 split($1,a, "."); 213 + if( length(a[3]) == "8" ) 214 + # a[3] is probably a date of format yyyymmdd used for release snapshots. We 215 + # can assume it to be zero as it does not signify a new version as such. 216 + a[3] = 0; 213 217 print a[1]*100000000 + a[2]*1000000 + a[3]*10000; 214 218 exit 215 219 }'
+1 -1
arch/powerpc/include/asm/imc-pmu.h
··· 137 137 * are inited. 138 138 */ 139 139 struct imc_pmu_ref { 140 - struct mutex lock; 140 + spinlock_t lock; 141 141 unsigned int id; 142 142 int refc; 143 143 };
+1 -1
arch/powerpc/mm/book3s64/hash_utils.c
··· 1012 1012 1013 1013 void hpt_clear_stress(void); 1014 1014 static struct timer_list stress_hpt_timer; 1015 - void stress_hpt_timer_fn(struct timer_list *timer) 1015 + static void stress_hpt_timer_fn(struct timer_list *timer) 1016 1016 { 1017 1017 int next_cpu; 1018 1018
+66 -70
arch/powerpc/perf/imc-pmu.c
··· 14 14 #include <asm/cputhreads.h> 15 15 #include <asm/smp.h> 16 16 #include <linux/string.h> 17 + #include <linux/spinlock.h> 17 18 18 19 /* Nest IMC data structures and variables */ 19 20 ··· 22 21 * Used to avoid races in counting the nest-pmu units during hotplug 23 22 * register and unregister 24 23 */ 25 - static DEFINE_MUTEX(nest_init_lock); 24 + static DEFINE_SPINLOCK(nest_init_lock); 26 25 static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc); 27 26 static struct imc_pmu **per_nest_pmu_arr; 28 27 static cpumask_t nest_imc_cpumask; ··· 51 50 * core and trace-imc 52 51 */ 53 52 static struct imc_pmu_ref imc_global_refc = { 54 - .lock = __MUTEX_INITIALIZER(imc_global_refc.lock), 53 + .lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock), 55 54 .id = 0, 56 55 .refc = 0, 57 56 }; ··· 401 400 get_hard_smp_processor_id(cpu)); 402 401 /* 403 402 * If this is the last cpu in this chip then, skip the reference 404 - * count mutex lock and make the reference count on this chip zero. 403 + * count lock and make the reference count on this chip zero. 405 404 */ 406 405 ref = get_nest_pmu_ref(cpu); 407 406 if (!ref) ··· 463 462 /* 464 463 * See if we need to disable the nest PMU. 465 464 * If no events are currently in use, then we have to take a 466 - * mutex to ensure that we don't race with another task doing 465 + * lock to ensure that we don't race with another task doing 467 466 * enable or disable the nest counters. 468 467 */ 469 468 ref = get_nest_pmu_ref(event->cpu); 470 469 if (!ref) 471 470 return; 472 471 473 - /* Take the mutex lock for this node and then decrement the reference count */ 474 - mutex_lock(&ref->lock); 472 + /* Take the lock for this node and then decrement the reference count */ 473 + spin_lock(&ref->lock); 475 474 if (ref->refc == 0) { 476 475 /* 477 476 * The scenario where this is true is, when perf session is ··· 483 482 * an OPAL call to disable the engine in that node. 484 483 * 485 484 */ 486 - mutex_unlock(&ref->lock); 485 + spin_unlock(&ref->lock); 487 486 return; 488 487 } 489 488 ref->refc--; ··· 491 490 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, 492 491 get_hard_smp_processor_id(event->cpu)); 493 492 if (rc) { 494 - mutex_unlock(&ref->lock); 493 + spin_unlock(&ref->lock); 495 494 pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id); 496 495 return; 497 496 } ··· 499 498 WARN(1, "nest-imc: Invalid event reference count\n"); 500 499 ref->refc = 0; 501 500 } 502 - mutex_unlock(&ref->lock); 501 + spin_unlock(&ref->lock); 503 502 } 504 503 505 504 static int nest_imc_event_init(struct perf_event *event) ··· 558 557 559 558 /* 560 559 * Get the imc_pmu_ref struct for this node. 561 - * Take the mutex lock and then increment the count of nest pmu events 562 - * inited. 560 + * Take the lock and then increment the count of nest pmu events inited. 563 561 */ 564 562 ref = get_nest_pmu_ref(event->cpu); 565 563 if (!ref) 566 564 return -EINVAL; 567 565 568 - mutex_lock(&ref->lock); 566 + spin_lock(&ref->lock); 569 567 if (ref->refc == 0) { 570 568 rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST, 571 569 get_hard_smp_processor_id(event->cpu)); 572 570 if (rc) { 573 - mutex_unlock(&ref->lock); 571 + spin_unlock(&ref->lock); 574 572 pr_err("nest-imc: Unable to start the counters for node %d\n", 575 573 node_id); 576 574 return rc; 577 575 } 578 576 } 579 577 ++ref->refc; 580 - mutex_unlock(&ref->lock); 578 + spin_unlock(&ref->lock); 581 579 582 580 event->destroy = nest_imc_counters_release; 583 581 return 0; ··· 612 612 return -ENOMEM; 613 613 mem_info->vbase = page_address(page); 614 614 615 - /* Init the mutex */ 616 615 core_imc_refc[core_id].id = core_id; 617 - mutex_init(&core_imc_refc[core_id].lock); 616 + spin_lock_init(&core_imc_refc[core_id].lock); 618 617 619 618 rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE, 620 619 __pa((void *)mem_info->vbase), ··· 702 703 perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu); 703 704 } else { 704 705 /* 705 - * If this is the last cpu in this core then, skip taking refernce 706 - * count mutex lock for this core and directly zero "refc" for 707 - * this core. 706 + * If this is the last cpu in this core then skip taking reference 707 + * count lock for this core and directly zero "refc" for this core. 708 708 */ 709 709 opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, 710 710 get_hard_smp_processor_id(cpu)); ··· 718 720 * last cpu in this core and core-imc event running 719 721 * in this cpu. 720 722 */ 721 - mutex_lock(&imc_global_refc.lock); 723 + spin_lock(&imc_global_refc.lock); 722 724 if (imc_global_refc.id == IMC_DOMAIN_CORE) 723 725 imc_global_refc.refc--; 724 726 725 - mutex_unlock(&imc_global_refc.lock); 727 + spin_unlock(&imc_global_refc.lock); 726 728 } 727 729 return 0; 728 730 } ··· 737 739 738 740 static void reset_global_refc(struct perf_event *event) 739 741 { 740 - mutex_lock(&imc_global_refc.lock); 742 + spin_lock(&imc_global_refc.lock); 741 743 imc_global_refc.refc--; 742 744 743 745 /* ··· 749 751 imc_global_refc.refc = 0; 750 752 imc_global_refc.id = 0; 751 753 } 752 - mutex_unlock(&imc_global_refc.lock); 754 + spin_unlock(&imc_global_refc.lock); 753 755 } 754 756 755 757 static void core_imc_counters_release(struct perf_event *event) ··· 762 764 /* 763 765 * See if we need to disable the IMC PMU. 764 766 * If no events are currently in use, then we have to take a 765 - * mutex to ensure that we don't race with another task doing 767 + * lock to ensure that we don't race with another task doing 766 768 * enable or disable the core counters. 767 769 */ 768 770 core_id = event->cpu / threads_per_core; 769 771 770 - /* Take the mutex lock and decrement the refernce count for this core */ 772 + /* Take the lock and decrement the refernce count for this core */ 771 773 ref = &core_imc_refc[core_id]; 772 774 if (!ref) 773 775 return; 774 776 775 - mutex_lock(&ref->lock); 777 + spin_lock(&ref->lock); 776 778 if (ref->refc == 0) { 777 779 /* 778 780 * The scenario where this is true is, when perf session is ··· 784 786 * an OPAL call to disable the engine in that core. 785 787 * 786 788 */ 787 - mutex_unlock(&ref->lock); 789 + spin_unlock(&ref->lock); 788 790 return; 789 791 } 790 792 ref->refc--; ··· 792 794 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, 793 795 get_hard_smp_processor_id(event->cpu)); 794 796 if (rc) { 795 - mutex_unlock(&ref->lock); 797 + spin_unlock(&ref->lock); 796 798 pr_err("IMC: Unable to stop the counters for core %d\n", core_id); 797 799 return; 798 800 } ··· 800 802 WARN(1, "core-imc: Invalid event reference count\n"); 801 803 ref->refc = 0; 802 804 } 803 - mutex_unlock(&ref->lock); 805 + spin_unlock(&ref->lock); 804 806 805 807 reset_global_refc(event); 806 808 } ··· 838 840 if ((!pcmi->vbase)) 839 841 return -ENODEV; 840 842 841 - /* Get the core_imc mutex for this core */ 842 843 ref = &core_imc_refc[core_id]; 843 844 if (!ref) 844 845 return -EINVAL; ··· 845 848 /* 846 849 * Core pmu units are enabled only when it is used. 847 850 * See if this is triggered for the first time. 848 - * If yes, take the mutex lock and enable the core counters. 851 + * If yes, take the lock and enable the core counters. 849 852 * If not, just increment the count in core_imc_refc struct. 850 853 */ 851 - mutex_lock(&ref->lock); 854 + spin_lock(&ref->lock); 852 855 if (ref->refc == 0) { 853 856 rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, 854 857 get_hard_smp_processor_id(event->cpu)); 855 858 if (rc) { 856 - mutex_unlock(&ref->lock); 859 + spin_unlock(&ref->lock); 857 860 pr_err("core-imc: Unable to start the counters for core %d\n", 858 861 core_id); 859 862 return rc; 860 863 } 861 864 } 862 865 ++ref->refc; 863 - mutex_unlock(&ref->lock); 866 + spin_unlock(&ref->lock); 864 867 865 868 /* 866 869 * Since the system can run either in accumulation or trace-mode ··· 871 874 * to know whether any other trace/thread imc 872 875 * events are running. 873 876 */ 874 - mutex_lock(&imc_global_refc.lock); 877 + spin_lock(&imc_global_refc.lock); 875 878 if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) { 876 879 /* 877 880 * No other trace/thread imc events are running in ··· 880 883 imc_global_refc.id = IMC_DOMAIN_CORE; 881 884 imc_global_refc.refc++; 882 885 } else { 883 - mutex_unlock(&imc_global_refc.lock); 886 + spin_unlock(&imc_global_refc.lock); 884 887 return -EBUSY; 885 888 } 886 - mutex_unlock(&imc_global_refc.lock); 889 + spin_unlock(&imc_global_refc.lock); 887 890 888 891 event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); 889 892 event->destroy = core_imc_counters_release; ··· 955 958 mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); 956 959 957 960 /* Reduce the refc if thread-imc event running on this cpu */ 958 - mutex_lock(&imc_global_refc.lock); 961 + spin_lock(&imc_global_refc.lock); 959 962 if (imc_global_refc.id == IMC_DOMAIN_THREAD) 960 963 imc_global_refc.refc--; 961 - mutex_unlock(&imc_global_refc.lock); 964 + spin_unlock(&imc_global_refc.lock); 962 965 963 966 return 0; 964 967 } ··· 998 1001 if (!target) 999 1002 return -EINVAL; 1000 1003 1001 - mutex_lock(&imc_global_refc.lock); 1004 + spin_lock(&imc_global_refc.lock); 1002 1005 /* 1003 1006 * Check if any other trace/core imc events are running in the 1004 1007 * system, if not set the global id to thread-imc. ··· 1007 1010 imc_global_refc.id = IMC_DOMAIN_THREAD; 1008 1011 imc_global_refc.refc++; 1009 1012 } else { 1010 - mutex_unlock(&imc_global_refc.lock); 1013 + spin_unlock(&imc_global_refc.lock); 1011 1014 return -EBUSY; 1012 1015 } 1013 - mutex_unlock(&imc_global_refc.lock); 1016 + spin_unlock(&imc_global_refc.lock); 1014 1017 1015 1018 event->pmu->task_ctx_nr = perf_sw_context; 1016 1019 event->destroy = reset_global_refc; ··· 1132 1135 /* 1133 1136 * imc pmus are enabled only when it is used. 1134 1137 * See if this is triggered for the first time. 1135 - * If yes, take the mutex lock and enable the counters. 1138 + * If yes, take the lock and enable the counters. 1136 1139 * If not, just increment the count in ref count struct. 1137 1140 */ 1138 1141 ref = &core_imc_refc[core_id]; 1139 1142 if (!ref) 1140 1143 return -EINVAL; 1141 1144 1142 - mutex_lock(&ref->lock); 1145 + spin_lock(&ref->lock); 1143 1146 if (ref->refc == 0) { 1144 1147 if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, 1145 1148 get_hard_smp_processor_id(smp_processor_id()))) { 1146 - mutex_unlock(&ref->lock); 1149 + spin_unlock(&ref->lock); 1147 1150 pr_err("thread-imc: Unable to start the counter\ 1148 1151 for core %d\n", core_id); 1149 1152 return -EINVAL; 1150 1153 } 1151 1154 } 1152 1155 ++ref->refc; 1153 - mutex_unlock(&ref->lock); 1156 + spin_unlock(&ref->lock); 1154 1157 return 0; 1155 1158 } 1156 1159 ··· 1167 1170 return; 1168 1171 } 1169 1172 1170 - mutex_lock(&ref->lock); 1173 + spin_lock(&ref->lock); 1171 1174 ref->refc--; 1172 1175 if (ref->refc == 0) { 1173 1176 if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, 1174 1177 get_hard_smp_processor_id(smp_processor_id()))) { 1175 - mutex_unlock(&ref->lock); 1178 + spin_unlock(&ref->lock); 1176 1179 pr_err("thread-imc: Unable to stop the counters\ 1177 1180 for core %d\n", core_id); 1178 1181 return; ··· 1180 1183 } else if (ref->refc < 0) { 1181 1184 ref->refc = 0; 1182 1185 } 1183 - mutex_unlock(&ref->lock); 1186 + spin_unlock(&ref->lock); 1184 1187 1185 1188 /* Set bit 0 of LDBAR to zero, to stop posting updates to memory */ 1186 1189 mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); ··· 1221 1224 } 1222 1225 } 1223 1226 1224 - /* Init the mutex, if not already */ 1225 1227 trace_imc_refc[core_id].id = core_id; 1226 - mutex_init(&trace_imc_refc[core_id].lock); 1228 + spin_lock_init(&trace_imc_refc[core_id].lock); 1227 1229 1228 1230 mtspr(SPRN_LDBAR, 0); 1229 1231 return 0; ··· 1242 1246 * Reduce the refc if any trace-imc event running 1243 1247 * on this cpu. 1244 1248 */ 1245 - mutex_lock(&imc_global_refc.lock); 1249 + spin_lock(&imc_global_refc.lock); 1246 1250 if (imc_global_refc.id == IMC_DOMAIN_TRACE) 1247 1251 imc_global_refc.refc--; 1248 - mutex_unlock(&imc_global_refc.lock); 1252 + spin_unlock(&imc_global_refc.lock); 1249 1253 1250 1254 return 0; 1251 1255 } ··· 1367 1371 } 1368 1372 1369 1373 mtspr(SPRN_LDBAR, ldbar_value); 1370 - mutex_lock(&ref->lock); 1374 + spin_lock(&ref->lock); 1371 1375 if (ref->refc == 0) { 1372 1376 if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE, 1373 1377 get_hard_smp_processor_id(smp_processor_id()))) { 1374 - mutex_unlock(&ref->lock); 1378 + spin_unlock(&ref->lock); 1375 1379 pr_err("trace-imc: Unable to start the counters for core %d\n", core_id); 1376 1380 return -EINVAL; 1377 1381 } 1378 1382 } 1379 1383 ++ref->refc; 1380 - mutex_unlock(&ref->lock); 1384 + spin_unlock(&ref->lock); 1381 1385 return 0; 1382 1386 } 1383 1387 ··· 1410 1414 return; 1411 1415 } 1412 1416 1413 - mutex_lock(&ref->lock); 1417 + spin_lock(&ref->lock); 1414 1418 ref->refc--; 1415 1419 if (ref->refc == 0) { 1416 1420 if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE, 1417 1421 get_hard_smp_processor_id(smp_processor_id()))) { 1418 - mutex_unlock(&ref->lock); 1422 + spin_unlock(&ref->lock); 1419 1423 pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id); 1420 1424 return; 1421 1425 } 1422 1426 } else if (ref->refc < 0) { 1423 1427 ref->refc = 0; 1424 1428 } 1425 - mutex_unlock(&ref->lock); 1429 + spin_unlock(&ref->lock); 1426 1430 1427 1431 trace_imc_event_stop(event, flags); 1428 1432 } ··· 1444 1448 * no other thread is running any core/thread imc 1445 1449 * events 1446 1450 */ 1447 - mutex_lock(&imc_global_refc.lock); 1451 + spin_lock(&imc_global_refc.lock); 1448 1452 if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) { 1449 1453 /* 1450 1454 * No core/thread imc events are running in the ··· 1453 1457 imc_global_refc.id = IMC_DOMAIN_TRACE; 1454 1458 imc_global_refc.refc++; 1455 1459 } else { 1456 - mutex_unlock(&imc_global_refc.lock); 1460 + spin_unlock(&imc_global_refc.lock); 1457 1461 return -EBUSY; 1458 1462 } 1459 - mutex_unlock(&imc_global_refc.lock); 1463 + spin_unlock(&imc_global_refc.lock); 1460 1464 1461 1465 event->hw.idx = -1; 1462 1466 ··· 1529 1533 i = 0; 1530 1534 for_each_node(nid) { 1531 1535 /* 1532 - * Mutex lock to avoid races while tracking the number of 1536 + * Take the lock to avoid races while tracking the number of 1533 1537 * sessions using the chip's nest pmu units. 1534 1538 */ 1535 - mutex_init(&nest_imc_refc[i].lock); 1539 + spin_lock_init(&nest_imc_refc[i].lock); 1536 1540 1537 1541 /* 1538 1542 * Loop to init the "id" with the node_id. Variable "i" initialized to ··· 1629 1633 static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) 1630 1634 { 1631 1635 if (pmu_ptr->domain == IMC_DOMAIN_NEST) { 1632 - mutex_lock(&nest_init_lock); 1636 + spin_lock(&nest_init_lock); 1633 1637 if (nest_pmus == 1) { 1634 1638 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE); 1635 1639 kfree(nest_imc_refc); ··· 1639 1643 1640 1644 if (nest_pmus > 0) 1641 1645 nest_pmus--; 1642 - mutex_unlock(&nest_init_lock); 1646 + spin_unlock(&nest_init_lock); 1643 1647 } 1644 1648 1645 1649 /* Free core_imc memory */ ··· 1796 1800 * rest. To handle the cpuhotplug callback unregister, we track 1797 1801 * the number of nest pmus in "nest_pmus". 1798 1802 */ 1799 - mutex_lock(&nest_init_lock); 1803 + spin_lock(&nest_init_lock); 1800 1804 if (nest_pmus == 0) { 1801 1805 ret = init_nest_pmu_ref(); 1802 1806 if (ret) { 1803 - mutex_unlock(&nest_init_lock); 1807 + spin_unlock(&nest_init_lock); 1804 1808 kfree(per_nest_pmu_arr); 1805 1809 per_nest_pmu_arr = NULL; 1806 1810 goto err_free_mem; ··· 1808 1812 /* Register for cpu hotplug notification. */ 1809 1813 ret = nest_pmu_cpumask_init(); 1810 1814 if (ret) { 1811 - mutex_unlock(&nest_init_lock); 1815 + spin_unlock(&nest_init_lock); 1812 1816 kfree(nest_imc_refc); 1813 1817 kfree(per_nest_pmu_arr); 1814 1818 per_nest_pmu_arr = NULL; ··· 1816 1820 } 1817 1821 } 1818 1822 nest_pmus++; 1819 - mutex_unlock(&nest_init_lock); 1823 + spin_unlock(&nest_init_lock); 1820 1824 break; 1821 1825 case IMC_DOMAIN_CORE: 1822 1826 ret = core_imc_pmu_cpumask_init();