Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'x86-urgent-2020-04-19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 and objtool fixes from Thomas Gleixner:
"A set of fixes for x86 and objtool:

objtool:

- Ignore the double UD2 which is emitted in BUG() when
CONFIG_UBSAN_TRAP is enabled.

- Support clang non-section symbols in objtool ORC dump

- Fix switch table detection in .text.unlikely

- Make the BP scratch register warning more robust.

x86:

- Increase microcode maximum patch size for AMD to cope with new CPUs
which have a larger patch size.

- Fix a crash in the resource control filesystem when the removal of
the default resource group is attempted.

- Preserve Code and Data Prioritization enabled state accross CPU
hotplug.

- Update split lock cpu matching to use the new X86_MATCH macros.

- Change the split lock enumeration as Intel finaly decided that the
IA32_CORE_CAPABILITIES bits are not architectural contrary to what
the SDM claims. !@#%$^!

- Add Tremont CPU models to the split lock detection cpu match.

- Add a missing static attribute to make sparse happy"

* tag 'x86-urgent-2020-04-19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/split_lock: Add Tremont family CPU models
x86/split_lock: Bits in IA32_CORE_CAPABILITIES are not architectural
x86/resctrl: Preserve CDP enable over CPU hotplug
x86/resctrl: Fix invalid attempt at removing the default resource group
x86/split_lock: Update to use X86_MATCH_INTEL_FAM6_MODEL()
x86/umip: Make umip_insns static
x86/microcode/AMD: Increase microcode PATCH_MAX_SIZE
objtool: Make BP scratch register warning more robust
objtool: Fix switch table detection in .text.unlikely
objtool: Support Clang non-section symbols in ORC generation
objtool: Support Clang non-section symbols in ORC dump
objtool: Fix CONFIG_UBSAN_TRAP unreachable warnings

+125 -51
+1 -1
arch/x86/include/asm/microcode_amd.h
··· 41 41 unsigned int mpb[0]; 42 42 }; 43 43 44 - #define PATCH_MAX_SIZE PAGE_SIZE 44 + #define PATCH_MAX_SIZE (3 * PAGE_SIZE) 45 45 46 46 #ifdef CONFIG_MICROCODE_AMD 47 47 extern void __init load_ucode_amd_bsp(unsigned int family);
+36 -18
arch/x86/kernel/cpu/intel.c
··· 1119 1119 sld_update_msr(!(tifn & _TIF_SLD)); 1120 1120 } 1121 1121 1122 - #define SPLIT_LOCK_CPU(model) {X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY} 1123 - 1124 1122 /* 1125 - * The following processors have the split lock detection feature. But 1126 - * since they don't have the IA32_CORE_CAPABILITIES MSR, the feature cannot 1127 - * be enumerated. Enable it by family and model matching on these 1128 - * processors. 1123 + * Bits in the IA32_CORE_CAPABILITIES are not architectural, so they should 1124 + * only be trusted if it is confirmed that a CPU model implements a 1125 + * specific feature at a particular bit position. 1126 + * 1127 + * The possible driver data field values: 1128 + * 1129 + * - 0: CPU models that are known to have the per-core split-lock detection 1130 + * feature even though they do not enumerate IA32_CORE_CAPABILITIES. 1131 + * 1132 + * - 1: CPU models which may enumerate IA32_CORE_CAPABILITIES and if so use 1133 + * bit 5 to enumerate the per-core split-lock detection feature. 1129 1134 */ 1130 1135 static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = { 1131 - SPLIT_LOCK_CPU(INTEL_FAM6_ICELAKE_X), 1132 - SPLIT_LOCK_CPU(INTEL_FAM6_ICELAKE_L), 1136 + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0), 1137 + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0), 1138 + X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, 1), 1139 + X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, 1), 1140 + X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1), 1133 1141 {} 1134 1142 }; 1135 1143 1136 1144 void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) 1137 1145 { 1138 - u64 ia32_core_caps = 0; 1146 + const struct x86_cpu_id *m; 1147 + u64 ia32_core_caps; 1139 1148 1140 - if (c->x86_vendor != X86_VENDOR_INTEL) 1149 + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 1141 1150 return; 1142 - if (cpu_has(c, X86_FEATURE_CORE_CAPABILITIES)) { 1143 - /* Enumerate features reported in IA32_CORE_CAPABILITIES MSR. */ 1151 + 1152 + m = x86_match_cpu(split_lock_cpu_ids); 1153 + if (!m) 1154 + return; 1155 + 1156 + switch (m->driver_data) { 1157 + case 0: 1158 + break; 1159 + case 1: 1160 + if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES)) 1161 + return; 1144 1162 rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps); 1145 - } else if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 1146 - /* Enumerate split lock detection by family and model. */ 1147 - if (x86_match_cpu(split_lock_cpu_ids)) 1148 - ia32_core_caps |= MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT; 1163 + if (!(ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT)) 1164 + return; 1165 + break; 1166 + default: 1167 + return; 1149 1168 } 1150 1169 1151 - if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT) 1152 - split_lock_setup(); 1170 + split_lock_setup(); 1153 1171 }
+2
arch/x86/kernel/cpu/resctrl/core.c
··· 578 578 d->id = id; 579 579 cpumask_set_cpu(cpu, &d->cpu_mask); 580 580 581 + rdt_domain_reconfigure_cdp(r); 582 + 581 583 if (r->alloc_capable && domain_setup_ctrlval(r, d)) { 582 584 kfree(d); 583 585 return;
+1
arch/x86/kernel/cpu/resctrl/internal.h
··· 601 601 void __check_limbo(struct rdt_domain *d, bool force_free); 602 602 bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r); 603 603 bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r); 604 + void rdt_domain_reconfigure_cdp(struct rdt_resource *r); 604 605 605 606 #endif /* _ASM_X86_RESCTRL_INTERNAL_H */
+15 -1
arch/x86/kernel/cpu/resctrl/rdtgroup.c
··· 1859 1859 return 0; 1860 1860 } 1861 1861 1862 + /* Restore the qos cfg state when a domain comes online */ 1863 + void rdt_domain_reconfigure_cdp(struct rdt_resource *r) 1864 + { 1865 + if (!r->alloc_capable) 1866 + return; 1867 + 1868 + if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA]) 1869 + l2_qos_cfg_update(&r->alloc_enabled); 1870 + 1871 + if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA]) 1872 + l3_qos_cfg_update(&r->alloc_enabled); 1873 + } 1874 + 1862 1875 /* 1863 1876 * Enable or disable the MBA software controller 1864 1877 * which helps user specify bandwidth in MBps. ··· 3085 3072 * If the rdtgroup is a mon group and parent directory 3086 3073 * is a valid "mon_groups" directory, remove the mon group. 3087 3074 */ 3088 - if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn) { 3075 + if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && 3076 + rdtgrp != &rdtgroup_default) { 3089 3077 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 3090 3078 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 3091 3079 ret = rdtgroup_ctrl_remove(kn, rdtgrp);
+1 -1
arch/x86/kernel/umip.c
··· 81 81 #define UMIP_INST_SLDT 3 /* 0F 00 /0 */ 82 82 #define UMIP_INST_STR 4 /* 0F 00 /1 */ 83 83 84 - const char * const umip_insns[5] = { 84 + static const char * const umip_insns[5] = { 85 85 [UMIP_INST_SGDT] = "SGDT", 86 86 [UMIP_INST_SIDT] = "SIDT", 87 87 [UMIP_INST_SMSW] = "SMSW",
+18 -8
tools/objtool/check.c
··· 1050 1050 * it. 1051 1051 */ 1052 1052 for (; 1053 - &insn->list != &file->insn_list && 1054 - insn->sec == func->sec && 1055 - insn->offset >= func->offset; 1056 - 1053 + &insn->list != &file->insn_list && insn->func && insn->func->pfunc == func; 1057 1054 insn = insn->first_jump_src ?: list_prev_entry(insn, list)) { 1058 1055 1059 1056 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) ··· 2005 2008 } 2006 2009 2007 2010 if (state->bp_scratch) { 2008 - WARN("%s uses BP as a scratch register", 2009 - func->name); 2011 + WARN_FUNC("BP used as a scratch register", 2012 + insn->sec, insn->offset); 2010 2013 return 1; 2011 2014 } 2012 2015 ··· 2361 2364 !strcmp(insn->sec->name, ".altinstr_aux")) 2362 2365 return true; 2363 2366 2367 + if (!insn->func) 2368 + return false; 2369 + 2370 + /* 2371 + * CONFIG_UBSAN_TRAP inserts a UD2 when it sees 2372 + * __builtin_unreachable(). The BUG() macro has an unreachable() after 2373 + * the UD2, which causes GCC's undefined trap logic to emit another UD2 2374 + * (or occasionally a JMP to UD2). 2375 + */ 2376 + if (list_prev_entry(insn, list)->dead_end && 2377 + (insn->type == INSN_BUG || 2378 + (insn->type == INSN_JUMP_UNCONDITIONAL && 2379 + insn->jump_dest && insn->jump_dest->type == INSN_BUG))) 2380 + return true; 2381 + 2364 2382 /* 2365 2383 * Check if this (or a subsequent) instruction is related to 2366 2384 * CONFIG_UBSAN or CONFIG_KASAN. 2367 2385 * 2368 2386 * End the search at 5 instructions to avoid going into the weeds. 2369 2387 */ 2370 - if (!insn->func) 2371 - return false; 2372 2388 for (i = 0; i < 5; i++) { 2373 2389 2374 2390 if (is_kasan_insn(insn) || is_ubsan_insn(insn))
+25 -15
tools/objtool/orc_dump.c
··· 66 66 char *name; 67 67 size_t nr_sections; 68 68 Elf64_Addr orc_ip_addr = 0; 69 - size_t shstrtab_idx; 69 + size_t shstrtab_idx, strtab_idx = 0; 70 70 Elf *elf; 71 71 Elf_Scn *scn; 72 72 GElf_Shdr sh; ··· 127 127 128 128 if (!strcmp(name, ".symtab")) { 129 129 symtab = data; 130 + } else if (!strcmp(name, ".strtab")) { 131 + strtab_idx = i; 130 132 } else if (!strcmp(name, ".orc_unwind")) { 131 133 orc = data->d_buf; 132 134 orc_size = sh.sh_size; ··· 140 138 } 141 139 } 142 140 143 - if (!symtab || !orc || !orc_ip) 141 + if (!symtab || !strtab_idx || !orc || !orc_ip) 144 142 return 0; 145 143 146 144 if (orc_size % sizeof(*orc) != 0) { ··· 161 159 return -1; 162 160 } 163 161 164 - scn = elf_getscn(elf, sym.st_shndx); 165 - if (!scn) { 166 - WARN_ELF("elf_getscn"); 167 - return -1; 168 - } 162 + if (GELF_ST_TYPE(sym.st_info) == STT_SECTION) { 163 + scn = elf_getscn(elf, sym.st_shndx); 164 + if (!scn) { 165 + WARN_ELF("elf_getscn"); 166 + return -1; 167 + } 169 168 170 - if (!gelf_getshdr(scn, &sh)) { 171 - WARN_ELF("gelf_getshdr"); 172 - return -1; 173 - } 169 + if (!gelf_getshdr(scn, &sh)) { 170 + WARN_ELF("gelf_getshdr"); 171 + return -1; 172 + } 174 173 175 - name = elf_strptr(elf, shstrtab_idx, sh.sh_name); 176 - if (!name || !*name) { 177 - WARN_ELF("elf_strptr"); 178 - return -1; 174 + name = elf_strptr(elf, shstrtab_idx, sh.sh_name); 175 + if (!name) { 176 + WARN_ELF("elf_strptr"); 177 + return -1; 178 + } 179 + } else { 180 + name = elf_strptr(elf, strtab_idx, sym.st_name); 181 + if (!name) { 182 + WARN_ELF("elf_strptr"); 183 + return -1; 184 + } 179 185 } 180 186 181 187 printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
+26 -7
tools/objtool/orc_gen.c
··· 88 88 struct orc_entry *orc; 89 89 struct rela *rela; 90 90 91 - if (!insn_sec->sym) { 92 - WARN("missing symbol for section %s", insn_sec->name); 93 - return -1; 94 - } 95 - 96 91 /* populate ORC data */ 97 92 orc = (struct orc_entry *)u_sec->data->d_buf + idx; 98 93 memcpy(orc, o, sizeof(*orc)); ··· 100 105 } 101 106 memset(rela, 0, sizeof(*rela)); 102 107 103 - rela->sym = insn_sec->sym; 104 - rela->addend = insn_off; 108 + if (insn_sec->sym) { 109 + rela->sym = insn_sec->sym; 110 + rela->addend = insn_off; 111 + } else { 112 + /* 113 + * The Clang assembler doesn't produce section symbols, so we 114 + * have to reference the function symbol instead: 115 + */ 116 + rela->sym = find_symbol_containing(insn_sec, insn_off); 117 + if (!rela->sym) { 118 + /* 119 + * Hack alert. This happens when we need to reference 120 + * the NOP pad insn immediately after the function. 121 + */ 122 + rela->sym = find_symbol_containing(insn_sec, 123 + insn_off - 1); 124 + } 125 + if (!rela->sym) { 126 + WARN("missing symbol for insn at offset 0x%lx\n", 127 + insn_off); 128 + return -1; 129 + } 130 + 131 + rela->addend = insn_off - rela->sym->offset; 132 + } 133 + 105 134 rela->type = R_X86_64_PC32; 106 135 rela->offset = idx * sizeof(int); 107 136 rela->sec = ip_relasec;