]> Git Repo - linux.git/blob - drivers/acpi/hmat/hmat.c
net: dsa: sja1105: Implement state machine for TAS with PTP clock source
[linux.git] / drivers / acpi / hmat / hmat.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019, Intel Corporation.
4  *
5  * Heterogeneous Memory Attributes Table (HMAT) representation
6  *
7  * This program parses and reports the platform's HMAT tables, and registers
8  * the applicable attributes with the node's interfaces.
9  */
10
11 #include <linux/acpi.h>
12 #include <linux/bitops.h>
13 #include <linux/device.h>
14 #include <linux/init.h>
15 #include <linux/list.h>
16 #include <linux/list_sort.h>
17 #include <linux/memory.h>
18 #include <linux/mutex.h>
19 #include <linux/node.h>
20 #include <linux/sysfs.h>
21
22 static u8 hmat_revision;
23
24 static LIST_HEAD(targets);
25 static LIST_HEAD(initiators);
26 static LIST_HEAD(localities);
27
28 static DEFINE_MUTEX(target_lock);
29
30 /*
31  * The defined enum order is used to prioritize attributes to break ties when
32  * selecting the best performing node.
33  */
34 enum locality_types {
35         WRITE_LATENCY,
36         READ_LATENCY,
37         WRITE_BANDWIDTH,
38         READ_BANDWIDTH,
39 };
40
41 static struct memory_locality *localities_types[4];
42
43 struct target_cache {
44         struct list_head node;
45         struct node_cache_attrs cache_attrs;
46 };
47
48 struct memory_target {
49         struct list_head node;
50         unsigned int memory_pxm;
51         unsigned int processor_pxm;
52         struct node_hmem_attrs hmem_attrs;
53         struct list_head caches;
54         struct node_cache_attrs cache_attrs;
55         bool registered;
56 };
57
58 struct memory_initiator {
59         struct list_head node;
60         unsigned int processor_pxm;
61 };
62
63 struct memory_locality {
64         struct list_head node;
65         struct acpi_hmat_locality *hmat_loc;
66 };
67
68 static struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
69 {
70         struct memory_initiator *initiator;
71
72         list_for_each_entry(initiator, &initiators, node)
73                 if (initiator->processor_pxm == cpu_pxm)
74                         return initiator;
75         return NULL;
76 }
77
78 static struct memory_target *find_mem_target(unsigned int mem_pxm)
79 {
80         struct memory_target *target;
81
82         list_for_each_entry(target, &targets, node)
83                 if (target->memory_pxm == mem_pxm)
84                         return target;
85         return NULL;
86 }
87
88 static __init void alloc_memory_initiator(unsigned int cpu_pxm)
89 {
90         struct memory_initiator *initiator;
91
92         if (pxm_to_node(cpu_pxm) == NUMA_NO_NODE)
93                 return;
94
95         initiator = find_mem_initiator(cpu_pxm);
96         if (initiator)
97                 return;
98
99         initiator = kzalloc(sizeof(*initiator), GFP_KERNEL);
100         if (!initiator)
101                 return;
102
103         initiator->processor_pxm = cpu_pxm;
104         list_add_tail(&initiator->node, &initiators);
105 }
106
107 static __init void alloc_memory_target(unsigned int mem_pxm)
108 {
109         struct memory_target *target;
110
111         target = find_mem_target(mem_pxm);
112         if (target)
113                 return;
114
115         target = kzalloc(sizeof(*target), GFP_KERNEL);
116         if (!target)
117                 return;
118
119         target->memory_pxm = mem_pxm;
120         target->processor_pxm = PXM_INVAL;
121         list_add_tail(&target->node, &targets);
122         INIT_LIST_HEAD(&target->caches);
123 }
124
125 static __init const char *hmat_data_type(u8 type)
126 {
127         switch (type) {
128         case ACPI_HMAT_ACCESS_LATENCY:
129                 return "Access Latency";
130         case ACPI_HMAT_READ_LATENCY:
131                 return "Read Latency";
132         case ACPI_HMAT_WRITE_LATENCY:
133                 return "Write Latency";
134         case ACPI_HMAT_ACCESS_BANDWIDTH:
135                 return "Access Bandwidth";
136         case ACPI_HMAT_READ_BANDWIDTH:
137                 return "Read Bandwidth";
138         case ACPI_HMAT_WRITE_BANDWIDTH:
139                 return "Write Bandwidth";
140         default:
141                 return "Reserved";
142         }
143 }
144
145 static __init const char *hmat_data_type_suffix(u8 type)
146 {
147         switch (type) {
148         case ACPI_HMAT_ACCESS_LATENCY:
149         case ACPI_HMAT_READ_LATENCY:
150         case ACPI_HMAT_WRITE_LATENCY:
151                 return " nsec";
152         case ACPI_HMAT_ACCESS_BANDWIDTH:
153         case ACPI_HMAT_READ_BANDWIDTH:
154         case ACPI_HMAT_WRITE_BANDWIDTH:
155                 return " MB/s";
156         default:
157                 return "";
158         }
159 }
160
161 static u32 hmat_normalize(u16 entry, u64 base, u8 type)
162 {
163         u32 value;
164
165         /*
166          * Check for invalid and overflow values
167          */
168         if (entry == 0xffff || !entry)
169                 return 0;
170         else if (base > (UINT_MAX / (entry)))
171                 return 0;
172
173         /*
174          * Divide by the base unit for version 1, convert latency from
175          * picosenonds to nanoseconds if revision 2.
176          */
177         value = entry * base;
178         if (hmat_revision == 1) {
179                 if (value < 10)
180                         return 0;
181                 value = DIV_ROUND_UP(value, 10);
182         } else if (hmat_revision == 2) {
183                 switch (type) {
184                 case ACPI_HMAT_ACCESS_LATENCY:
185                 case ACPI_HMAT_READ_LATENCY:
186                 case ACPI_HMAT_WRITE_LATENCY:
187                         value = DIV_ROUND_UP(value, 1000);
188                         break;
189                 default:
190                         break;
191                 }
192         }
193         return value;
194 }
195
196 static void hmat_update_target_access(struct memory_target *target,
197                                              u8 type, u32 value)
198 {
199         switch (type) {
200         case ACPI_HMAT_ACCESS_LATENCY:
201                 target->hmem_attrs.read_latency = value;
202                 target->hmem_attrs.write_latency = value;
203                 break;
204         case ACPI_HMAT_READ_LATENCY:
205                 target->hmem_attrs.read_latency = value;
206                 break;
207         case ACPI_HMAT_WRITE_LATENCY:
208                 target->hmem_attrs.write_latency = value;
209                 break;
210         case ACPI_HMAT_ACCESS_BANDWIDTH:
211                 target->hmem_attrs.read_bandwidth = value;
212                 target->hmem_attrs.write_bandwidth = value;
213                 break;
214         case ACPI_HMAT_READ_BANDWIDTH:
215                 target->hmem_attrs.read_bandwidth = value;
216                 break;
217         case ACPI_HMAT_WRITE_BANDWIDTH:
218                 target->hmem_attrs.write_bandwidth = value;
219                 break;
220         default:
221                 break;
222         }
223 }
224
225 static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
226 {
227         struct memory_locality *loc;
228
229         loc = kzalloc(sizeof(*loc), GFP_KERNEL);
230         if (!loc) {
231                 pr_notice_once("Failed to allocate HMAT locality\n");
232                 return;
233         }
234
235         loc->hmat_loc = hmat_loc;
236         list_add_tail(&loc->node, &localities);
237
238         switch (hmat_loc->data_type) {
239         case ACPI_HMAT_ACCESS_LATENCY:
240                 localities_types[READ_LATENCY] = loc;
241                 localities_types[WRITE_LATENCY] = loc;
242                 break;
243         case ACPI_HMAT_READ_LATENCY:
244                 localities_types[READ_LATENCY] = loc;
245                 break;
246         case ACPI_HMAT_WRITE_LATENCY:
247                 localities_types[WRITE_LATENCY] = loc;
248                 break;
249         case ACPI_HMAT_ACCESS_BANDWIDTH:
250                 localities_types[READ_BANDWIDTH] = loc;
251                 localities_types[WRITE_BANDWIDTH] = loc;
252                 break;
253         case ACPI_HMAT_READ_BANDWIDTH:
254                 localities_types[READ_BANDWIDTH] = loc;
255                 break;
256         case ACPI_HMAT_WRITE_BANDWIDTH:
257                 localities_types[WRITE_BANDWIDTH] = loc;
258                 break;
259         default:
260                 break;
261         }
262 }
263
264 static __init int hmat_parse_locality(union acpi_subtable_headers *header,
265                                       const unsigned long end)
266 {
267         struct acpi_hmat_locality *hmat_loc = (void *)header;
268         struct memory_target *target;
269         unsigned int init, targ, total_size, ipds, tpds;
270         u32 *inits, *targs, value;
271         u16 *entries;
272         u8 type, mem_hier;
273
274         if (hmat_loc->header.length < sizeof(*hmat_loc)) {
275                 pr_notice("HMAT: Unexpected locality header length: %d\n",
276                          hmat_loc->header.length);
277                 return -EINVAL;
278         }
279
280         type = hmat_loc->data_type;
281         mem_hier = hmat_loc->flags & ACPI_HMAT_MEMORY_HIERARCHY;
282         ipds = hmat_loc->number_of_initiator_Pds;
283         tpds = hmat_loc->number_of_target_Pds;
284         total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds +
285                      sizeof(*inits) * ipds + sizeof(*targs) * tpds;
286         if (hmat_loc->header.length < total_size) {
287                 pr_notice("HMAT: Unexpected locality header length:%d, minimum required:%d\n",
288                          hmat_loc->header.length, total_size);
289                 return -EINVAL;
290         }
291
292         pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%d Target Domains:%d Base:%lld\n",
293                 hmat_loc->flags, hmat_data_type(type), ipds, tpds,
294                 hmat_loc->entry_base_unit);
295
296         inits = (u32 *)(hmat_loc + 1);
297         targs = inits + ipds;
298         entries = (u16 *)(targs + tpds);
299         for (init = 0; init < ipds; init++) {
300                 alloc_memory_initiator(inits[init]);
301                 for (targ = 0; targ < tpds; targ++) {
302                         value = hmat_normalize(entries[init * tpds + targ],
303                                                hmat_loc->entry_base_unit,
304                                                type);
305                         pr_info("  Initiator-Target[%d-%d]:%d%s\n",
306                                 inits[init], targs[targ], value,
307                                 hmat_data_type_suffix(type));
308
309                         if (mem_hier == ACPI_HMAT_MEMORY) {
310                                 target = find_mem_target(targs[targ]);
311                                 if (target && target->processor_pxm == inits[init])
312                                         hmat_update_target_access(target, type, value);
313                         }
314                 }
315         }
316
317         if (mem_hier == ACPI_HMAT_MEMORY)
318                 hmat_add_locality(hmat_loc);
319
320         return 0;
321 }
322
323 static __init int hmat_parse_cache(union acpi_subtable_headers *header,
324                                    const unsigned long end)
325 {
326         struct acpi_hmat_cache *cache = (void *)header;
327         struct memory_target *target;
328         struct target_cache *tcache;
329         u32 attrs;
330
331         if (cache->header.length < sizeof(*cache)) {
332                 pr_notice("HMAT: Unexpected cache header length: %d\n",
333                          cache->header.length);
334                 return -EINVAL;
335         }
336
337         attrs = cache->cache_attributes;
338         pr_info("HMAT: Cache: Domain:%d Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
339                 cache->memory_PD, cache->cache_size, attrs,
340                 cache->number_of_SMBIOShandles);
341
342         target = find_mem_target(cache->memory_PD);
343         if (!target)
344                 return 0;
345
346         tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
347         if (!tcache) {
348                 pr_notice_once("Failed to allocate HMAT cache info\n");
349                 return 0;
350         }
351
352         tcache->cache_attrs.size = cache->cache_size;
353         tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
354         tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
355
356         switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
357         case ACPI_HMAT_CA_DIRECT_MAPPED:
358                 tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
359                 break;
360         case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
361                 tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
362                 break;
363         case ACPI_HMAT_CA_NONE:
364         default:
365                 tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
366                 break;
367         }
368
369         switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
370         case ACPI_HMAT_CP_WB:
371                 tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
372                 break;
373         case ACPI_HMAT_CP_WT:
374                 tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
375                 break;
376         case ACPI_HMAT_CP_NONE:
377         default:
378                 tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
379                 break;
380         }
381         list_add_tail(&tcache->node, &target->caches);
382
383         return 0;
384 }
385
386 static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *header,
387                                               const unsigned long end)
388 {
389         struct acpi_hmat_proximity_domain *p = (void *)header;
390         struct memory_target *target = NULL;
391
392         if (p->header.length != sizeof(*p)) {
393                 pr_notice("HMAT: Unexpected address range header length: %d\n",
394                          p->header.length);
395                 return -EINVAL;
396         }
397
398         if (hmat_revision == 1)
399                 pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%d Memory Domain:%d\n",
400                         p->reserved3, p->reserved4, p->flags, p->processor_PD,
401                         p->memory_PD);
402         else
403                 pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n",
404                         p->flags, p->processor_PD, p->memory_PD);
405
406         if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) {
407                 target = find_mem_target(p->memory_PD);
408                 if (!target) {
409                         pr_debug("HMAT: Memory Domain missing from SRAT\n");
410                         return -EINVAL;
411                 }
412         }
413         if (target && p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) {
414                 int p_node = pxm_to_node(p->processor_PD);
415
416                 if (p_node == NUMA_NO_NODE) {
417                         pr_debug("HMAT: Invalid Processor Domain\n");
418                         return -EINVAL;
419                 }
420                 target->processor_pxm = p_node;
421         }
422
423         return 0;
424 }
425
426 static int __init hmat_parse_subtable(union acpi_subtable_headers *header,
427                                       const unsigned long end)
428 {
429         struct acpi_hmat_structure *hdr = (void *)header;
430
431         if (!hdr)
432                 return -EINVAL;
433
434         switch (hdr->type) {
435         case ACPI_HMAT_TYPE_PROXIMITY:
436                 return hmat_parse_proximity_domain(header, end);
437         case ACPI_HMAT_TYPE_LOCALITY:
438                 return hmat_parse_locality(header, end);
439         case ACPI_HMAT_TYPE_CACHE:
440                 return hmat_parse_cache(header, end);
441         default:
442                 return -EINVAL;
443         }
444 }
445
446 static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
447                                           const unsigned long end)
448 {
449         struct acpi_srat_mem_affinity *ma = (void *)header;
450
451         if (!ma)
452                 return -EINVAL;
453         if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
454                 return 0;
455         alloc_memory_target(ma->proximity_domain);
456         return 0;
457 }
458
459 static u32 hmat_initiator_perf(struct memory_target *target,
460                                struct memory_initiator *initiator,
461                                struct acpi_hmat_locality *hmat_loc)
462 {
463         unsigned int ipds, tpds, i, idx = 0, tdx = 0;
464         u32 *inits, *targs;
465         u16 *entries;
466
467         ipds = hmat_loc->number_of_initiator_Pds;
468         tpds = hmat_loc->number_of_target_Pds;
469         inits = (u32 *)(hmat_loc + 1);
470         targs = inits + ipds;
471         entries = (u16 *)(targs + tpds);
472
473         for (i = 0; i < ipds; i++) {
474                 if (inits[i] == initiator->processor_pxm) {
475                         idx = i;
476                         break;
477                 }
478         }
479
480         if (i == ipds)
481                 return 0;
482
483         for (i = 0; i < tpds; i++) {
484                 if (targs[i] == target->memory_pxm) {
485                         tdx = i;
486                         break;
487                 }
488         }
489         if (i == tpds)
490                 return 0;
491
492         return hmat_normalize(entries[idx * tpds + tdx],
493                               hmat_loc->entry_base_unit,
494                               hmat_loc->data_type);
495 }
496
497 static bool hmat_update_best(u8 type, u32 value, u32 *best)
498 {
499         bool updated = false;
500
501         if (!value)
502                 return false;
503
504         switch (type) {
505         case ACPI_HMAT_ACCESS_LATENCY:
506         case ACPI_HMAT_READ_LATENCY:
507         case ACPI_HMAT_WRITE_LATENCY:
508                 if (!*best || *best > value) {
509                         *best = value;
510                         updated = true;
511                 }
512                 break;
513         case ACPI_HMAT_ACCESS_BANDWIDTH:
514         case ACPI_HMAT_READ_BANDWIDTH:
515         case ACPI_HMAT_WRITE_BANDWIDTH:
516                 if (!*best || *best < value) {
517                         *best = value;
518                         updated = true;
519                 }
520                 break;
521         }
522
523         return updated;
524 }
525
526 static int initiator_cmp(void *priv, struct list_head *a, struct list_head *b)
527 {
528         struct memory_initiator *ia;
529         struct memory_initiator *ib;
530         unsigned long *p_nodes = priv;
531
532         ia = list_entry(a, struct memory_initiator, node);
533         ib = list_entry(b, struct memory_initiator, node);
534
535         set_bit(ia->processor_pxm, p_nodes);
536         set_bit(ib->processor_pxm, p_nodes);
537
538         return ia->processor_pxm - ib->processor_pxm;
539 }
540
541 static void hmat_register_target_initiators(struct memory_target *target)
542 {
543         static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
544         struct memory_initiator *initiator;
545         unsigned int mem_nid, cpu_nid;
546         struct memory_locality *loc = NULL;
547         u32 best = 0;
548         int i;
549
550         mem_nid = pxm_to_node(target->memory_pxm);
551         /*
552          * If the Address Range Structure provides a local processor pxm, link
553          * only that one. Otherwise, find the best performance attributes and
554          * register all initiators that match.
555          */
556         if (target->processor_pxm != PXM_INVAL) {
557                 cpu_nid = pxm_to_node(target->processor_pxm);
558                 register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
559                 return;
560         }
561
562         if (list_empty(&localities))
563                 return;
564
565         /*
566          * We need the initiator list sorted so we can use bitmap_clear for
567          * previously set initiators when we find a better memory accessor.
568          * We'll also use the sorting to prime the candidate nodes with known
569          * initiators.
570          */
571         bitmap_zero(p_nodes, MAX_NUMNODES);
572         list_sort(p_nodes, &initiators, initiator_cmp);
573         for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
574                 loc = localities_types[i];
575                 if (!loc)
576                         continue;
577
578                 best = 0;
579                 list_for_each_entry(initiator, &initiators, node) {
580                         u32 value;
581
582                         if (!test_bit(initiator->processor_pxm, p_nodes))
583                                 continue;
584
585                         value = hmat_initiator_perf(target, initiator, loc->hmat_loc);
586                         if (hmat_update_best(loc->hmat_loc->data_type, value, &best))
587                                 bitmap_clear(p_nodes, 0, initiator->processor_pxm);
588                         if (value != best)
589                                 clear_bit(initiator->processor_pxm, p_nodes);
590                 }
591                 if (best)
592                         hmat_update_target_access(target, loc->hmat_loc->data_type, best);
593         }
594
595         for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
596                 cpu_nid = pxm_to_node(i);
597                 register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
598         }
599 }
600
601 static void hmat_register_target_cache(struct memory_target *target)
602 {
603         unsigned mem_nid = pxm_to_node(target->memory_pxm);
604         struct target_cache *tcache;
605
606         list_for_each_entry(tcache, &target->caches, node)
607                 node_add_cache(mem_nid, &tcache->cache_attrs);
608 }
609
610 static void hmat_register_target_perf(struct memory_target *target)
611 {
612         unsigned mem_nid = pxm_to_node(target->memory_pxm);
613         node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
614 }
615
616 static void hmat_register_target(struct memory_target *target)
617 {
618         int nid = pxm_to_node(target->memory_pxm);
619
620         /*
621          * Skip offline nodes. This can happen when memory
622          * marked EFI_MEMORY_SP, "specific purpose", is applied
623          * to all the memory in a promixity domain leading to
624          * the node being marked offline / unplugged, or if
625          * memory-only "hotplug" node is offline.
626          */
627         if (nid == NUMA_NO_NODE || !node_online(nid))
628                 return;
629
630         mutex_lock(&target_lock);
631         if (!target->registered) {
632                 hmat_register_target_initiators(target);
633                 hmat_register_target_cache(target);
634                 hmat_register_target_perf(target);
635                 target->registered = true;
636         }
637         mutex_unlock(&target_lock);
638 }
639
640 static void hmat_register_targets(void)
641 {
642         struct memory_target *target;
643
644         list_for_each_entry(target, &targets, node)
645                 hmat_register_target(target);
646 }
647
648 static int hmat_callback(struct notifier_block *self,
649                          unsigned long action, void *arg)
650 {
651         struct memory_target *target;
652         struct memory_notify *mnb = arg;
653         int pxm, nid = mnb->status_change_nid;
654
655         if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
656                 return NOTIFY_OK;
657
658         pxm = node_to_pxm(nid);
659         target = find_mem_target(pxm);
660         if (!target)
661                 return NOTIFY_OK;
662
663         hmat_register_target(target);
664         return NOTIFY_OK;
665 }
666
667 static struct notifier_block hmat_callback_nb = {
668         .notifier_call = hmat_callback,
669         .priority = 2,
670 };
671
672 static __init void hmat_free_structures(void)
673 {
674         struct memory_target *target, *tnext;
675         struct memory_locality *loc, *lnext;
676         struct memory_initiator *initiator, *inext;
677         struct target_cache *tcache, *cnext;
678
679         list_for_each_entry_safe(target, tnext, &targets, node) {
680                 list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
681                         list_del(&tcache->node);
682                         kfree(tcache);
683                 }
684                 list_del(&target->node);
685                 kfree(target);
686         }
687
688         list_for_each_entry_safe(initiator, inext, &initiators, node) {
689                 list_del(&initiator->node);
690                 kfree(initiator);
691         }
692
693         list_for_each_entry_safe(loc, lnext, &localities, node) {
694                 list_del(&loc->node);
695                 kfree(loc);
696         }
697 }
698
699 static __init int hmat_init(void)
700 {
701         struct acpi_table_header *tbl;
702         enum acpi_hmat_type i;
703         acpi_status status;
704
705         if (srat_disabled())
706                 return 0;
707
708         status = acpi_get_table(ACPI_SIG_SRAT, 0, &tbl);
709         if (ACPI_FAILURE(status))
710                 return 0;
711
712         if (acpi_table_parse_entries(ACPI_SIG_SRAT,
713                                 sizeof(struct acpi_table_srat),
714                                 ACPI_SRAT_TYPE_MEMORY_AFFINITY,
715                                 srat_parse_mem_affinity, 0) < 0)
716                 goto out_put;
717         acpi_put_table(tbl);
718
719         status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl);
720         if (ACPI_FAILURE(status))
721                 goto out_put;
722
723         hmat_revision = tbl->revision;
724         switch (hmat_revision) {
725         case 1:
726         case 2:
727                 break;
728         default:
729                 pr_notice("Ignoring HMAT: Unknown revision:%d\n", hmat_revision);
730                 goto out_put;
731         }
732
733         for (i = ACPI_HMAT_TYPE_PROXIMITY; i < ACPI_HMAT_TYPE_RESERVED; i++) {
734                 if (acpi_table_parse_entries(ACPI_SIG_HMAT,
735                                              sizeof(struct acpi_table_hmat), i,
736                                              hmat_parse_subtable, 0) < 0) {
737                         pr_notice("Ignoring HMAT: Invalid table");
738                         goto out_put;
739                 }
740         }
741         hmat_register_targets();
742
743         /* Keep the table and structures if the notifier may use them */
744         if (!register_hotmemory_notifier(&hmat_callback_nb))
745                 return 0;
746 out_put:
747         hmat_free_structures();
748         acpi_put_table(tbl);
749         return 0;
750 }
751 subsys_initcall(hmat_init);
This page took 0.074488 seconds and 4 git commands to generate.