]> Git Repo - linux.git/blobdiff - mm/sparse.c
mm/sparse: use the new sparse buffer functions in non-vmemmap
[linux.git] / mm / sparse.c
index f13f2723950ad4c06502c15df16cd8f7acb8e4c3..db4867b62fff67c36372944ca7a29f94f5634ca9 100644 (file)
@@ -200,6 +200,12 @@ static inline int next_present_section_nr(int section_nr)
              (section_nr <= __highest_present_section_nr));    \
             section_nr = next_present_section_nr(section_nr))
 
+/*
+ * Record how many memory sections are marked as present
+ * during system bootup.
+ */
+static int __initdata nr_present_sections;
+
 /* Record a memory area against a node. */
 void __init memory_present(int nid, unsigned long start, unsigned long end)
 {
@@ -229,6 +235,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
                        ms->section_mem_map = sparse_encode_early_nid(nid) |
                                                        SECTION_IS_ONLINE;
                        section_mark_present(ms);
+                       nr_present_sections++;
                }
        }
 }
@@ -257,19 +264,14 @@ struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pn
        return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
 }
 
-static int __meminit sparse_init_one_section(struct mem_section *ms,
+static void __meminit sparse_init_one_section(struct mem_section *ms,
                unsigned long pnum, struct page *mem_map,
                unsigned long *pageblock_bitmap)
 {
-       if (!present_section(ms))
-               return -EINVAL;
-
        ms->section_mem_map &= ~SECTION_MAP_MASK;
        ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
                                                        SECTION_HAS_MEM_MAP;
        ms->pageblock_flags = pageblock_bitmap;
-
-       return 1;
 }
 
 unsigned long usemap_size(void)
@@ -379,6 +381,7 @@ static void __init sparse_early_usemaps_alloc_node(void *data,
        unsigned long pnum;
        unsigned long **usemap_map = (unsigned long **)data;
        int size = usemap_size();
+       int nr_consumed_maps = 0;
 
        usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
                                                          size * usemap_count);
@@ -390,20 +393,35 @@ static void __init sparse_early_usemaps_alloc_node(void *data,
        for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
                if (!present_section_nr(pnum))
                        continue;
-               usemap_map[pnum] = usemap;
+               usemap_map[nr_consumed_maps] = usemap;
                usemap += size;
-               check_usemap_section_nr(nodeid, usemap_map[pnum]);
+               check_usemap_section_nr(nodeid, usemap_map[nr_consumed_maps]);
+               nr_consumed_maps++;
        }
 }
 
-#ifndef CONFIG_SPARSEMEM_VMEMMAP
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+unsigned long __init section_map_size(void)
+
+{
+       return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
+}
+
+#else
+unsigned long __init section_map_size(void)
+{
+       return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
+}
+
 struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
                struct vmem_altmap *altmap)
 {
-       struct page *map;
-       unsigned long size;
+       unsigned long size = section_map_size();
+       struct page *map = sparse_buffer_alloc(size);
+
+       if (map)
+               return map;
 
-       size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
        map = memblock_virt_alloc_try_nid(size,
                                          PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
                                          BOOTMEM_ALLOC_ACCESSIBLE, nid);
@@ -414,41 +432,61 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
                                          unsigned long pnum_end,
                                          unsigned long map_count, int nodeid)
 {
-       void *map;
        unsigned long pnum;
-       unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
-
-       size = PAGE_ALIGN(size);
-       map = memblock_virt_alloc_try_nid_raw(size * map_count,
-                                             PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
-                                             BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
-       if (map) {
-               for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
-                       if (!present_section_nr(pnum))
-                               continue;
-                       map_map[pnum] = map;
-                       map += size;
-               }
-               return;
-       }
+       unsigned long size = section_map_size();
+       int nr_consumed_maps = 0;
 
-       /* fallback */
+       sparse_buffer_init(size * map_count, nodeid);
        for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
-               struct mem_section *ms;
-
                if (!present_section_nr(pnum))
                        continue;
-               map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL);
-               if (map_map[pnum])
+               map_map[nr_consumed_maps] =
+                               sparse_mem_map_populate(pnum, nodeid, NULL);
+               if (map_map[nr_consumed_maps++])
                        continue;
-               ms = __nr_to_section(pnum);
                pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
                       __func__);
-               ms->section_mem_map = 0;
        }
+       sparse_buffer_fini();
 }
 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
 
+static void *sparsemap_buf __meminitdata;
+static void *sparsemap_buf_end __meminitdata;
+
+void __init sparse_buffer_init(unsigned long size, int nid)
+{
+       WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
+       sparsemap_buf =
+               memblock_virt_alloc_try_nid_raw(size, PAGE_SIZE,
+                                               __pa(MAX_DMA_ADDRESS),
+                                               BOOTMEM_ALLOC_ACCESSIBLE, nid);
+       sparsemap_buf_end = sparsemap_buf + size;
+}
+
+void __init sparse_buffer_fini(void)
+{
+       unsigned long size = sparsemap_buf_end - sparsemap_buf;
+
+       if (sparsemap_buf && size > 0)
+               memblock_free_early(__pa(sparsemap_buf), size);
+       sparsemap_buf = NULL;
+}
+
+void * __meminit sparse_buffer_alloc(unsigned long size)
+{
+       void *ptr = NULL;
+
+       if (sparsemap_buf) {
+               ptr = PTR_ALIGN(sparsemap_buf, size);
+               if (ptr + size > sparsemap_buf_end)
+                       ptr = NULL;
+               else
+                       sparsemap_buf = ptr + size;
+       }
+       return ptr;
+}
+
 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
 static void __init sparse_early_mem_maps_alloc_node(void *data,
                                 unsigned long pnum_begin,
@@ -472,7 +510,6 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
 
        pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
               __func__);
-       ms->section_mem_map = 0;
        return NULL;
 }
 #endif
@@ -484,10 +521,12 @@ void __weak __meminit vmemmap_populate_print_last(void)
 /**
  *  alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
  *  @map: usemap_map for pageblock flags or mmap_map for vmemmap
+ *  @unit_size: size of map unit
  */
 static void __init alloc_usemap_and_memmap(void (*alloc_func)
                                        (void *, unsigned long, unsigned long,
-                                       unsigned long, int), void *data)
+                                       unsigned long, int), void *data,
+                                       int data_unit_size)
 {
        unsigned long pnum;
        unsigned long map_count;
@@ -519,6 +558,7 @@ static void __init alloc_usemap_and_memmap(void (*alloc_func)
                /* new start, update count etc*/
                nodeid_begin = nodeid;
                pnum_begin = pnum;
+               data += map_count * data_unit_size;
                map_count = 1;
        }
        /* ok, last chunk */
@@ -537,6 +577,7 @@ void __init sparse_init(void)
        unsigned long *usemap;
        unsigned long **usemap_map;
        int size;
+       int nr_consumed_maps = 0;
 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
        int size2;
        struct page **map_map;
@@ -559,37 +600,64 @@ void __init sparse_init(void)
         * powerpc need to call sparse_init_one_section right after each
         * sparse_early_mem_map_alloc, so allocate usemap_map at first.
         */
-       size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
+       size = sizeof(unsigned long *) * nr_present_sections;
        usemap_map = memblock_virt_alloc(size, 0);
        if (!usemap_map)
                panic("can not allocate usemap_map\n");
        alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
-                                                       (void *)usemap_map);
+                               (void *)usemap_map,
+                               sizeof(usemap_map[0]));
 
 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
-       size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
+       size2 = sizeof(struct page *) * nr_present_sections;
        map_map = memblock_virt_alloc(size2, 0);
        if (!map_map)
                panic("can not allocate map_map\n");
        alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
-                                                       (void *)map_map);
+                               (void *)map_map,
+                               sizeof(map_map[0]));
 #endif
 
+       /*
+        * The number of present sections stored in nr_present_sections
+        * are kept the same since mem sections are marked as present in
+        * memory_present(). In this for loop, we need check which sections
+        * failed to allocate memmap or usemap, then clear its
+        * ->section_mem_map accordingly. During this process, we need
+        * increase 'nr_consumed_maps' whether its allocation of memmap
+        * or usemap failed or not, so that after we handle the i-th
+        * memory section, can get memmap and usemap of (i+1)-th section
+        * correctly.
+        */
        for_each_present_section_nr(0, pnum) {
-               usemap = usemap_map[pnum];
-               if (!usemap)
+               struct mem_section *ms;
+
+               if (nr_consumed_maps >= nr_present_sections) {
+                       pr_err("nr_consumed_maps goes beyond nr_present_sections\n");
+                       break;
+               }
+               ms = __nr_to_section(pnum);
+               usemap = usemap_map[nr_consumed_maps];
+               if (!usemap) {
+                       ms->section_mem_map = 0;
+                       nr_consumed_maps++;
                        continue;
+               }
 
 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
-               map = map_map[pnum];
+               map = map_map[nr_consumed_maps];
 #else
                map = sparse_early_mem_map_alloc(pnum);
 #endif
-               if (!map)
+               if (!map) {
+                       ms->section_mem_map = 0;
+                       nr_consumed_maps++;
                        continue;
+               }
 
                sparse_init_one_section(__nr_to_section(pnum), pnum, map,
                                                                usemap);
+               nr_consumed_maps++;
        }
 
        vmemmap_populate_print_last();
@@ -760,6 +828,7 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat,
        ret = sparse_index_init(section_nr, pgdat->node_id);
        if (ret < 0 && ret != -EEXIST)
                return ret;
+       ret = 0;
        memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap);
        if (!memmap)
                return -ENOMEM;
@@ -786,12 +855,11 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat,
 #endif
 
        section_mark_present(ms);
-
-       ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
+       sparse_init_one_section(ms, section_nr, memmap, usemap);
 
 out:
        pgdat_resize_unlock(pgdat, &flags);
-       if (ret <= 0) {
+       if (ret < 0) {
                kfree(usemap);
                __kfree_section_memmap(memmap, altmap);
        }
This page took 0.039056 seconds and 4 git commands to generate.