Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Basic Node interface support
4 */
5
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/mm.h>
9#include <linux/memory.h>
10#include <linux/mempolicy.h>
11#include <linux/vmstat.h>
12#include <linux/notifier.h>
13#include <linux/node.h>
14#include <linux/hugetlb.h>
15#include <linux/compaction.h>
16#include <linux/cpumask.h>
17#include <linux/topology.h>
18#include <linux/nodemask.h>
19#include <linux/cpu.h>
20#include <linux/device.h>
21#include <linux/pm_runtime.h>
22#include <linux/swap.h>
23#include <linux/slab.h>
24#include <linux/memblock.h>
25
26static const struct bus_type node_subsys = {
27 .name = "node",
28 .dev_name = "node",
29};
30
31static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
32 const struct bin_attribute *attr, char *buf,
33 loff_t off, size_t count)
34{
35 struct device *dev = kobj_to_dev(kobj);
36 struct node *node_dev = to_node(dev);
37 cpumask_var_t mask;
38 ssize_t n;
39
40 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
41 return 0;
42
43 cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
44 n = cpumap_print_bitmask_to_buf(buf, mask, off, count);
45 free_cpumask_var(mask);
46
47 return n;
48}
49
50static const BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES);
51
52static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
53 const struct bin_attribute *attr, char *buf,
54 loff_t off, size_t count)
55{
56 struct device *dev = kobj_to_dev(kobj);
57 struct node *node_dev = to_node(dev);
58 cpumask_var_t mask;
59 ssize_t n;
60
61 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
62 return 0;
63
64 cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
65 n = cpumap_print_list_to_buf(buf, mask, off, count);
66 free_cpumask_var(mask);
67
68 return n;
69}
70
71static const BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
72
73/**
74 * struct node_access_nodes - Access class device to hold user visible
75 * relationships to other nodes.
76 * @dev: Device for this memory access class
77 * @list_node: List element in the node's access list
78 * @access: The access class rank
79 * @coord: Heterogeneous memory performance coordinates
80 */
81struct node_access_nodes {
82 struct device dev;
83 struct list_head list_node;
84 unsigned int access;
85#ifdef CONFIG_HMEM_REPORTING
86 struct access_coordinate coord;
87#endif
88};
89#define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
90
91static struct attribute *node_init_access_node_attrs[] = {
92 NULL,
93};
94
95static struct attribute *node_targ_access_node_attrs[] = {
96 NULL,
97};
98
99static const struct attribute_group initiators = {
100 .name = "initiators",
101 .attrs = node_init_access_node_attrs,
102};
103
104static const struct attribute_group targets = {
105 .name = "targets",
106 .attrs = node_targ_access_node_attrs,
107};
108
109static const struct attribute_group *node_access_node_groups[] = {
110 &initiators,
111 &targets,
112 NULL,
113};
114
115#ifdef CONFIG_MEMORY_HOTPLUG
116static BLOCKING_NOTIFIER_HEAD(node_chain);
117
118int register_node_notifier(struct notifier_block *nb)
119{
120 return blocking_notifier_chain_register(&node_chain, nb);
121}
122EXPORT_SYMBOL(register_node_notifier);
123
124void unregister_node_notifier(struct notifier_block *nb)
125{
126 blocking_notifier_chain_unregister(&node_chain, nb);
127}
128EXPORT_SYMBOL(unregister_node_notifier);
129
130int node_notify(unsigned long val, void *v)
131{
132 return blocking_notifier_call_chain(&node_chain, val, v);
133}
134#endif
135
136static void node_remove_accesses(struct node *node)
137{
138 struct node_access_nodes *c, *cnext;
139
140 list_for_each_entry_safe(c, cnext, &node->access_list, list_node) {
141 list_del(&c->list_node);
142 device_unregister(&c->dev);
143 }
144}
145
146static void node_access_release(struct device *dev)
147{
148 kfree(to_access_nodes(dev));
149}
150
151static struct node_access_nodes *node_init_node_access(struct node *node,
152 enum access_coordinate_class access)
153{
154 struct node_access_nodes *access_node;
155 struct device *dev;
156
157 list_for_each_entry(access_node, &node->access_list, list_node)
158 if (access_node->access == access)
159 return access_node;
160
161 access_node = kzalloc_obj(*access_node);
162 if (!access_node)
163 return NULL;
164
165 access_node->access = access;
166 dev = &access_node->dev;
167 dev->parent = &node->dev;
168 dev->release = node_access_release;
169 dev->groups = node_access_node_groups;
170 if (dev_set_name(dev, "access%u", access))
171 goto free;
172
173 if (device_register(dev))
174 goto free_name;
175
176 pm_runtime_no_callbacks(dev);
177 list_add_tail(&access_node->list_node, &node->access_list);
178 return access_node;
179free_name:
180 kfree_const(dev->kobj.name);
181free:
182 kfree(access_node);
183 return NULL;
184}
185
186#ifdef CONFIG_HMEM_REPORTING
187#define ACCESS_ATTR(property) \
188static ssize_t property##_show(struct device *dev, \
189 struct device_attribute *attr, \
190 char *buf) \
191{ \
192 return sysfs_emit(buf, "%u\n", \
193 to_access_nodes(dev)->coord.property); \
194} \
195static DEVICE_ATTR_RO(property)
196
197ACCESS_ATTR(read_bandwidth);
198ACCESS_ATTR(read_latency);
199ACCESS_ATTR(write_bandwidth);
200ACCESS_ATTR(write_latency);
201
202static struct attribute *access_attrs[] = {
203 &dev_attr_read_bandwidth.attr,
204 &dev_attr_read_latency.attr,
205 &dev_attr_write_bandwidth.attr,
206 &dev_attr_write_latency.attr,
207 NULL,
208};
209
210/**
211 * node_set_perf_attrs - Set the performance values for given access class
212 * @nid: Node identifier to be set
213 * @coord: Heterogeneous memory performance coordinates
214 * @access: The access class the for the given attributes
215 */
216void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
217 enum access_coordinate_class access)
218{
219 struct node_access_nodes *c;
220 struct node *node;
221 int i;
222
223 if (WARN_ON_ONCE(!node_online(nid)))
224 return;
225
226 node = node_devices[nid];
227 c = node_init_node_access(node, access);
228 if (!c)
229 return;
230
231 c->coord = *coord;
232 for (i = 0; access_attrs[i] != NULL; i++) {
233 if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i],
234 "initiators")) {
235 pr_info("failed to add performance attribute to node %d\n",
236 nid);
237 break;
238 }
239 }
240
241 /* When setting CPU access coordinates, update mempolicy */
242 if (access == ACCESS_COORDINATE_CPU) {
243 if (mempolicy_set_node_perf(nid, coord)) {
244 pr_info("failed to set mempolicy attrs for node %d\n",
245 nid);
246 }
247 }
248}
249EXPORT_SYMBOL_GPL(node_set_perf_attrs);
250
251/**
252 * node_update_perf_attrs - Update the performance values for given access class
253 * @nid: Node identifier to be updated
254 * @coord: Heterogeneous memory performance coordinates
255 * @access: The access class for the given attributes
256 */
257void node_update_perf_attrs(unsigned int nid, struct access_coordinate *coord,
258 enum access_coordinate_class access)
259{
260 struct node_access_nodes *access_node;
261 struct node *node;
262 int i;
263
264 if (WARN_ON_ONCE(!node_online(nid)))
265 return;
266
267 node = node_devices[nid];
268 list_for_each_entry(access_node, &node->access_list, list_node) {
269 if (access_node->access != access)
270 continue;
271
272 access_node->coord = *coord;
273 for (i = 0; access_attrs[i]; i++) {
274 sysfs_notify(&access_node->dev.kobj,
275 NULL, access_attrs[i]->name);
276 }
277 break;
278 }
279
280 /* When setting CPU access coordinates, update mempolicy */
281 if (access != ACCESS_COORDINATE_CPU)
282 return;
283
284 if (mempolicy_set_node_perf(nid, coord))
285 pr_info("failed to set mempolicy attrs for node %d\n", nid);
286}
287EXPORT_SYMBOL_GPL(node_update_perf_attrs);
288
289/**
290 * struct node_cache_info - Internal tracking for memory node caches
291 * @dev: Device represeting the cache level
292 * @node: List element for tracking in the node
293 * @cache_attrs:Attributes for this cache level
294 */
295struct node_cache_info {
296 struct device dev;
297 struct list_head node;
298 struct node_cache_attrs cache_attrs;
299};
300#define to_cache_info(device) container_of(device, struct node_cache_info, dev)
301
302#define CACHE_ATTR(name, fmt) \
303static ssize_t name##_show(struct device *dev, \
304 struct device_attribute *attr, \
305 char *buf) \
306{ \
307 return sysfs_emit(buf, fmt "\n", \
308 to_cache_info(dev)->cache_attrs.name); \
309} \
310static DEVICE_ATTR_RO(name);
311
312CACHE_ATTR(size, "%llu")
313CACHE_ATTR(line_size, "%u")
314CACHE_ATTR(indexing, "%u")
315CACHE_ATTR(write_policy, "%u")
316CACHE_ATTR(address_mode, "%#x")
317
318static struct attribute *cache_attrs[] = {
319 &dev_attr_indexing.attr,
320 &dev_attr_size.attr,
321 &dev_attr_line_size.attr,
322 &dev_attr_write_policy.attr,
323 &dev_attr_address_mode.attr,
324 NULL,
325};
326ATTRIBUTE_GROUPS(cache);
327
328static void node_cache_release(struct device *dev)
329{
330 kfree(dev);
331}
332
333static void node_cacheinfo_release(struct device *dev)
334{
335 struct node_cache_info *info = to_cache_info(dev);
336 kfree(info);
337}
338
339static void node_init_cache_dev(struct node *node)
340{
341 struct device *dev;
342
343 dev = kzalloc_obj(*dev);
344 if (!dev)
345 return;
346
347 device_initialize(dev);
348 dev->parent = &node->dev;
349 dev->release = node_cache_release;
350 if (dev_set_name(dev, "memory_side_cache"))
351 goto put_device;
352
353 if (device_add(dev))
354 goto put_device;
355
356 pm_runtime_no_callbacks(dev);
357 node->cache_dev = dev;
358 return;
359put_device:
360 put_device(dev);
361}
362
363/**
364 * node_add_cache() - add cache attribute to a memory node
365 * @nid: Node identifier that has new cache attributes
366 * @cache_attrs: Attributes for the cache being added
367 */
368void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
369{
370 struct node_cache_info *info;
371 struct device *dev;
372 struct node *node;
373
374 if (!node_online(nid) || !node_devices[nid])
375 return;
376
377 node = node_devices[nid];
378 list_for_each_entry(info, &node->cache_attrs, node) {
379 if (info->cache_attrs.level == cache_attrs->level) {
380 dev_warn(&node->dev,
381 "attempt to add duplicate cache level:%d\n",
382 cache_attrs->level);
383 return;
384 }
385 }
386
387 if (!node->cache_dev)
388 node_init_cache_dev(node);
389 if (!node->cache_dev)
390 return;
391
392 info = kzalloc_obj(*info);
393 if (!info)
394 return;
395
396 dev = &info->dev;
397 device_initialize(dev);
398 dev->parent = node->cache_dev;
399 dev->release = node_cacheinfo_release;
400 dev->groups = cache_groups;
401 if (dev_set_name(dev, "index%d", cache_attrs->level))
402 goto put_device;
403
404 info->cache_attrs = *cache_attrs;
405 if (device_add(dev)) {
406 dev_warn(&node->dev, "failed to add cache level:%d\n",
407 cache_attrs->level);
408 goto put_device;
409 }
410 pm_runtime_no_callbacks(dev);
411 list_add_tail(&info->node, &node->cache_attrs);
412 return;
413put_device:
414 put_device(dev);
415}
416
417static void node_remove_caches(struct node *node)
418{
419 struct node_cache_info *info, *next;
420
421 if (!node->cache_dev)
422 return;
423
424 list_for_each_entry_safe(info, next, &node->cache_attrs, node) {
425 list_del(&info->node);
426 device_unregister(&info->dev);
427 }
428 device_unregister(node->cache_dev);
429}
430
431static void node_init_caches(unsigned int nid)
432{
433 INIT_LIST_HEAD(&node_devices[nid]->cache_attrs);
434}
435#else
436static void node_init_caches(unsigned int nid) { }
437static void node_remove_caches(struct node *node) { }
438#endif
439
440#define K(x) ((x) << (PAGE_SHIFT - 10))
441static ssize_t node_read_meminfo(struct device *dev,
442 struct device_attribute *attr, char *buf)
443{
444 int len = 0;
445 int nid = dev->id;
446 struct pglist_data *pgdat = NODE_DATA(nid);
447 struct sysinfo i;
448 unsigned long sreclaimable, sunreclaimable;
449 unsigned long swapcached = 0;
450
451 si_meminfo_node(&i, nid);
452 sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B);
453 sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B);
454#ifdef CONFIG_SWAP
455 swapcached = node_page_state_pages(pgdat, NR_SWAPCACHE);
456#endif
457 len = sysfs_emit_at(buf, len,
458 "Node %d MemTotal: %8lu kB\n"
459 "Node %d MemFree: %8lu kB\n"
460 "Node %d MemUsed: %8lu kB\n"
461 "Node %d SwapCached: %8lu kB\n"
462 "Node %d Active: %8lu kB\n"
463 "Node %d Inactive: %8lu kB\n"
464 "Node %d Active(anon): %8lu kB\n"
465 "Node %d Inactive(anon): %8lu kB\n"
466 "Node %d Active(file): %8lu kB\n"
467 "Node %d Inactive(file): %8lu kB\n"
468 "Node %d Unevictable: %8lu kB\n"
469 "Node %d Mlocked: %8lu kB\n",
470 nid, K(i.totalram),
471 nid, K(i.freeram),
472 nid, K(i.totalram - i.freeram),
473 nid, K(swapcached),
474 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
475 node_page_state(pgdat, NR_ACTIVE_FILE)),
476 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
477 node_page_state(pgdat, NR_INACTIVE_FILE)),
478 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)),
479 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)),
480 nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)),
481 nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)),
482 nid, K(node_page_state(pgdat, NR_UNEVICTABLE)),
483 nid, K(sum_zone_node_page_state(nid, NR_MLOCK)));
484
485#ifdef CONFIG_HIGHMEM
486 len += sysfs_emit_at(buf, len,
487 "Node %d HighTotal: %8lu kB\n"
488 "Node %d HighFree: %8lu kB\n"
489 "Node %d LowTotal: %8lu kB\n"
490 "Node %d LowFree: %8lu kB\n",
491 nid, K(i.totalhigh),
492 nid, K(i.freehigh),
493 nid, K(i.totalram - i.totalhigh),
494 nid, K(i.freeram - i.freehigh));
495#endif
496 len += sysfs_emit_at(buf, len,
497 "Node %d Dirty: %8lu kB\n"
498 "Node %d Writeback: %8lu kB\n"
499 "Node %d FilePages: %8lu kB\n"
500 "Node %d Mapped: %8lu kB\n"
501 "Node %d AnonPages: %8lu kB\n"
502 "Node %d Shmem: %8lu kB\n"
503 "Node %d KernelStack: %8lu kB\n"
504#ifdef CONFIG_SHADOW_CALL_STACK
505 "Node %d ShadowCallStack:%8lu kB\n"
506#endif
507 "Node %d PageTables: %8lu kB\n"
508 "Node %d SecPageTables: %8lu kB\n"
509 "Node %d NFS_Unstable: %8lu kB\n"
510 "Node %d Bounce: %8lu kB\n"
511 "Node %d WritebackTmp: %8lu kB\n"
512 "Node %d KReclaimable: %8lu kB\n"
513 "Node %d Slab: %8lu kB\n"
514 "Node %d SReclaimable: %8lu kB\n"
515 "Node %d SUnreclaim: %8lu kB\n"
516#ifdef CONFIG_TRANSPARENT_HUGEPAGE
517 "Node %d AnonHugePages: %8lu kB\n"
518 "Node %d ShmemHugePages: %8lu kB\n"
519 "Node %d ShmemPmdMapped: %8lu kB\n"
520 "Node %d FileHugePages: %8lu kB\n"
521 "Node %d FilePmdMapped: %8lu kB\n"
522#endif
523#ifdef CONFIG_UNACCEPTED_MEMORY
524 "Node %d Unaccepted: %8lu kB\n"
525#endif
526 "Node %d GPUActive: %8lu kB\n"
527 "Node %d GPUReclaim: %8lu kB\n"
528 ,
529 nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
530 nid, K(node_page_state(pgdat, NR_WRITEBACK)),
531 nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
532 nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
533 nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
534 nid, K(i.sharedram),
535 nid, node_page_state(pgdat, NR_KERNEL_STACK_KB),
536#ifdef CONFIG_SHADOW_CALL_STACK
537 nid, node_page_state(pgdat, NR_KERNEL_SCS_KB),
538#endif
539 nid, K(node_page_state(pgdat, NR_PAGETABLE)),
540 nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
541 nid, 0UL,
542 nid, 0UL,
543 nid, 0UL,
544 nid, K(sreclaimable +
545 node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
546 nid, K(sreclaimable + sunreclaimable),
547 nid, K(sreclaimable),
548 nid, K(sunreclaimable)
549#ifdef CONFIG_TRANSPARENT_HUGEPAGE
550 ,
551 nid, K(node_page_state(pgdat, NR_ANON_THPS)),
552 nid, K(node_page_state(pgdat, NR_SHMEM_THPS)),
553 nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
554 nid, K(node_page_state(pgdat, NR_FILE_THPS)),
555 nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED))
556#endif
557#ifdef CONFIG_UNACCEPTED_MEMORY
558 ,
559 nid, K(sum_zone_node_page_state(nid, NR_UNACCEPTED))
560#endif
561 ,
562 nid, K(node_page_state(pgdat, NR_GPU_ACTIVE)),
563 nid, K(node_page_state(pgdat, NR_GPU_RECLAIM))
564 );
565 len += hugetlb_report_node_meminfo(buf, len, nid);
566 return len;
567}
568
569#undef K
570static DEVICE_ATTR(meminfo, 0444, node_read_meminfo, NULL);
571
572static ssize_t node_read_numastat(struct device *dev,
573 struct device_attribute *attr, char *buf)
574{
575 fold_vm_numa_events();
576 return sysfs_emit(buf,
577 "numa_hit %lu\n"
578 "numa_miss %lu\n"
579 "numa_foreign %lu\n"
580 "interleave_hit %lu\n"
581 "local_node %lu\n"
582 "other_node %lu\n",
583 sum_zone_numa_event_state(dev->id, NUMA_HIT),
584 sum_zone_numa_event_state(dev->id, NUMA_MISS),
585 sum_zone_numa_event_state(dev->id, NUMA_FOREIGN),
586 sum_zone_numa_event_state(dev->id, NUMA_INTERLEAVE_HIT),
587 sum_zone_numa_event_state(dev->id, NUMA_LOCAL),
588 sum_zone_numa_event_state(dev->id, NUMA_OTHER));
589}
590static DEVICE_ATTR(numastat, 0444, node_read_numastat, NULL);
591
592static ssize_t node_read_vmstat(struct device *dev,
593 struct device_attribute *attr, char *buf)
594{
595 int nid = dev->id;
596 struct pglist_data *pgdat = NODE_DATA(nid);
597 int i;
598 int len = 0;
599
600 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
601 len += sysfs_emit_at(buf, len, "%s %lu\n",
602 zone_stat_name(i),
603 sum_zone_node_page_state(nid, i));
604
605#ifdef CONFIG_NUMA
606 fold_vm_numa_events();
607 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
608 len += sysfs_emit_at(buf, len, "%s %lu\n",
609 numa_stat_name(i),
610 sum_zone_numa_event_state(nid, i));
611
612#endif
613 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
614 unsigned long pages = node_page_state_pages(pgdat, i);
615
616 if (vmstat_item_print_in_thp(i))
617 pages /= HPAGE_PMD_NR;
618 len += sysfs_emit_at(buf, len, "%s %lu\n", node_stat_name(i),
619 pages);
620 }
621
622 return len;
623}
624static DEVICE_ATTR(vmstat, 0444, node_read_vmstat, NULL);
625
626static ssize_t node_read_distance(struct device *dev,
627 struct device_attribute *attr, char *buf)
628{
629 int nid = dev->id;
630 int len = 0;
631 int i;
632
633 /*
634 * buf is currently PAGE_SIZE in length and each node needs 4 chars
635 * at the most (distance + space or newline).
636 */
637 BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
638
639 for_each_online_node(i) {
640 len += sysfs_emit_at(buf, len, "%s%d",
641 i ? " " : "", node_distance(nid, i));
642 }
643
644 len += sysfs_emit_at(buf, len, "\n");
645 return len;
646}
647static DEVICE_ATTR(distance, 0444, node_read_distance, NULL);
648
649static struct attribute *node_dev_attrs[] = {
650 &dev_attr_meminfo.attr,
651 &dev_attr_numastat.attr,
652 &dev_attr_distance.attr,
653 &dev_attr_vmstat.attr,
654 NULL
655};
656
657static const struct bin_attribute *node_dev_bin_attrs[] = {
658 &bin_attr_cpumap,
659 &bin_attr_cpulist,
660 NULL
661};
662
663static const struct attribute_group node_dev_group = {
664 .attrs = node_dev_attrs,
665 .bin_attrs = node_dev_bin_attrs,
666};
667
668static const struct attribute_group *node_dev_groups[] = {
669 &node_dev_group,
670#ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
671 &arch_node_dev_group,
672#endif
673#ifdef CONFIG_MEMORY_FAILURE
674 &memory_failure_attr_group,
675#endif
676 NULL
677};
678
679static void node_device_release(struct device *dev)
680{
681 kfree(to_node(dev));
682}
683
684struct node *node_devices[MAX_NUMNODES];
685
686/*
687 * register cpu under node
688 */
689int register_cpu_under_node(unsigned int cpu, unsigned int nid)
690{
691 int ret;
692 struct device *obj;
693
694 if (!node_online(nid))
695 return 0;
696
697 obj = get_cpu_device(cpu);
698 if (!obj)
699 return 0;
700
701 ret = sysfs_create_link(&node_devices[nid]->dev.kobj,
702 &obj->kobj,
703 kobject_name(&obj->kobj));
704 if (ret)
705 return ret;
706
707 return sysfs_create_link(&obj->kobj,
708 &node_devices[nid]->dev.kobj,
709 kobject_name(&node_devices[nid]->dev.kobj));
710}
711
712/**
713 * register_memory_node_under_compute_node - link memory node to its compute
714 * node for a given access class.
715 * @mem_nid: Memory node number
716 * @cpu_nid: Cpu node number
717 * @access: Access class to register
718 *
719 * Description:
720 * For use with platforms that may have separate memory and compute nodes.
721 * This function will export node relationships linking which memory
722 * initiator nodes can access memory targets at a given ranked access
723 * class.
724 */
725int register_memory_node_under_compute_node(unsigned int mem_nid,
726 unsigned int cpu_nid,
727 enum access_coordinate_class access)
728{
729 struct node *init_node, *targ_node;
730 struct node_access_nodes *initiator, *target;
731 int ret;
732
733 if (!node_online(cpu_nid) || !node_online(mem_nid))
734 return -ENODEV;
735
736 init_node = node_devices[cpu_nid];
737 targ_node = node_devices[mem_nid];
738 initiator = node_init_node_access(init_node, access);
739 target = node_init_node_access(targ_node, access);
740 if (!initiator || !target)
741 return -ENOMEM;
742
743 ret = sysfs_add_link_to_group(&initiator->dev.kobj, "targets",
744 &targ_node->dev.kobj,
745 dev_name(&targ_node->dev));
746 if (ret)
747 return ret;
748
749 ret = sysfs_add_link_to_group(&target->dev.kobj, "initiators",
750 &init_node->dev.kobj,
751 dev_name(&init_node->dev));
752 if (ret)
753 goto err;
754
755 return 0;
756 err:
757 sysfs_remove_link_from_group(&initiator->dev.kobj, "targets",
758 dev_name(&targ_node->dev));
759 return ret;
760}
761
762int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
763{
764 struct device *obj;
765
766 if (!node_online(nid))
767 return 0;
768
769 obj = get_cpu_device(cpu);
770 if (!obj)
771 return 0;
772
773 sysfs_remove_link(&node_devices[nid]->dev.kobj,
774 kobject_name(&obj->kobj));
775 sysfs_remove_link(&obj->kobj,
776 kobject_name(&node_devices[nid]->dev.kobj));
777
778 return 0;
779}
780
781#ifdef CONFIG_MEMORY_HOTPLUG
782static void do_register_memory_block_under_node(int nid,
783 struct memory_block *mem_blk)
784{
785 int ret;
786
787 ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
788 &mem_blk->dev.kobj,
789 kobject_name(&mem_blk->dev.kobj));
790 if (ret && ret != -EEXIST)
791 dev_err_ratelimited(&node_devices[nid]->dev,
792 "can't create link to %s in sysfs (%d)\n",
793 kobject_name(&mem_blk->dev.kobj), ret);
794
795 ret = sysfs_create_link_nowarn(&mem_blk->dev.kobj,
796 &node_devices[nid]->dev.kobj,
797 kobject_name(&node_devices[nid]->dev.kobj));
798 if (ret && ret != -EEXIST)
799 dev_err_ratelimited(&mem_blk->dev,
800 "can't create link to %s in sysfs (%d)\n",
801 kobject_name(&node_devices[nid]->dev.kobj),
802 ret);
803}
804
805/*
806 * During hotplug we know that all pages in the memory block belong to the same
807 * node.
808 */
809static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
810 void *arg)
811{
812 int nid = *(int *)arg;
813
814 do_register_memory_block_under_node(nid, mem_blk);
815 return 0;
816}
817
818/*
819 * Unregister a memory block device under the node it spans. Memory blocks
820 * with multiple nodes cannot be offlined and therefore also never be removed.
821 */
822void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
823{
824 if (mem_blk->nid == NUMA_NO_NODE)
825 return;
826
827 sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj,
828 kobject_name(&mem_blk->dev.kobj));
829 sysfs_remove_link(&mem_blk->dev.kobj,
830 kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
831}
832
833/* register all memory blocks under the corresponding nodes */
834static void register_memory_blocks_under_nodes(void)
835{
836 struct memblock_region *r;
837
838 for_each_mem_region(r) {
839 const unsigned long start_block_id = phys_to_block_id(r->base);
840 const unsigned long end_block_id = phys_to_block_id(r->base + r->size - 1);
841 const int nid = memblock_get_region_node(r);
842 unsigned long block_id;
843
844 if (!node_online(nid))
845 continue;
846
847 for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
848 struct memory_block *mem;
849
850 mem = find_memory_block_by_id(block_id);
851 if (!mem)
852 continue;
853
854 memory_block_add_nid_early(mem, nid);
855 do_register_memory_block_under_node(nid, mem);
856 put_device(&mem->dev);
857 }
858
859 }
860}
861
862void register_memory_blocks_under_node_hotplug(int nid, unsigned long start_pfn,
863 unsigned long end_pfn)
864{
865 walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn),
866 (void *)&nid, register_mem_block_under_node_hotplug);
867 return;
868}
869#endif /* CONFIG_MEMORY_HOTPLUG */
870
871/**
872 * register_node - Initialize and register the node device.
873 * @nid: Node number to use when creating the device.
874 *
875 * Return: 0 on success, -errno otherwise
876 */
877int register_node(int nid)
878{
879 int error;
880 int cpu;
881 struct node *node;
882
883 node = kzalloc_obj(struct node);
884 if (!node)
885 return -ENOMEM;
886
887 INIT_LIST_HEAD(&node->access_list);
888
889 node->dev.id = nid;
890 node->dev.bus = &node_subsys;
891 node->dev.release = node_device_release;
892 node->dev.groups = node_dev_groups;
893
894 error = device_register(&node->dev);
895 if (error) {
896 put_device(&node->dev);
897 return error;
898 }
899
900 node_devices[nid] = node;
901 hugetlb_register_node(node);
902 compaction_register_node(node);
903 reclaim_register_node(node);
904
905 /* link cpu under this node */
906 for_each_present_cpu(cpu) {
907 if (cpu_to_node(cpu) == nid)
908 register_cpu_under_node(cpu, nid);
909 }
910
911 node_init_caches(nid);
912
913 return error;
914}
915/**
916 * unregister_node - unregister a node device
917 * @nid: nid of the node going away
918 *
919 * Unregisters the node device at node id @nid. All the devices on the
920 * node must be unregistered before calling this function.
921 */
922void unregister_node(int nid)
923{
924 struct node *node = node_devices[nid];
925
926 if (!node)
927 return;
928
929 hugetlb_unregister_node(node);
930 compaction_unregister_node(node);
931 reclaim_unregister_node(node);
932 node_remove_accesses(node);
933 node_remove_caches(node);
934 device_unregister(&node->dev);
935 node_devices[nid] = NULL;
936}
937
938/*
939 * node states attributes
940 */
941
942struct node_attr {
943 struct device_attribute attr;
944 enum node_states state;
945};
946
947static ssize_t show_node_state(struct device *dev,
948 struct device_attribute *attr, char *buf)
949{
950 struct node_attr *na = container_of(attr, struct node_attr, attr);
951
952 return sysfs_emit(buf, "%*pbl\n",
953 nodemask_pr_args(&node_states[na->state]));
954}
955
956#define _NODE_ATTR(name, state) \
957 { __ATTR(name, 0444, show_node_state, NULL), state }
958
959static struct node_attr node_state_attr[] = {
960 [N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE),
961 [N_ONLINE] = _NODE_ATTR(online, N_ONLINE),
962 [N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
963#ifdef CONFIG_HIGHMEM
964 [N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
965#endif
966 [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
967 [N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
968 [N_GENERIC_INITIATOR] = _NODE_ATTR(has_generic_initiator,
969 N_GENERIC_INITIATOR),
970};
971
972static struct attribute *node_state_attrs[] = {
973 &node_state_attr[N_POSSIBLE].attr.attr,
974 &node_state_attr[N_ONLINE].attr.attr,
975 &node_state_attr[N_NORMAL_MEMORY].attr.attr,
976#ifdef CONFIG_HIGHMEM
977 &node_state_attr[N_HIGH_MEMORY].attr.attr,
978#endif
979 &node_state_attr[N_MEMORY].attr.attr,
980 &node_state_attr[N_CPU].attr.attr,
981 &node_state_attr[N_GENERIC_INITIATOR].attr.attr,
982 NULL
983};
984
985static const struct attribute_group memory_root_attr_group = {
986 .attrs = node_state_attrs,
987};
988
989static const struct attribute_group *cpu_root_attr_groups[] = {
990 &memory_root_attr_group,
991 NULL,
992};
993
994void __init node_dev_init(void)
995{
996 int ret, i;
997
998 BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
999 BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
1000
1001 ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
1002 if (ret)
1003 panic("%s() failed to register subsystem: %d\n", __func__, ret);
1004
1005 /*
1006 * Create all node devices, which will properly link the node
1007 * to already created cpu devices.
1008 */
1009 for_each_online_node(i) {
1010 ret = register_node(i);
1011 if (ret)
1012 panic("%s() failed to add node: %d\n", __func__, ret);
1013 }
1014
1015 register_memory_blocks_under_nodes();
1016}