]>
Commit | Line | Data |
---|---|---|
208d54e5 DH |
1 | #ifndef __LINUX_MEMORY_HOTPLUG_H |
2 | #define __LINUX_MEMORY_HOTPLUG_H | |
3 | ||
4 | #include <linux/mmzone.h> | |
5 | #include <linux/spinlock.h> | |
3947be19 | 6 | #include <linux/notifier.h> |
208d54e5 | 7 | |
78679302 KH |
8 | struct page; |
9 | struct zone; | |
10 | struct pglist_data; | |
ea01ea93 | 11 | struct mem_section; |
78679302 | 12 | |
208d54e5 | 13 | #ifdef CONFIG_MEMORY_HOTPLUG |
04753278 YG |
14 | |
15 | /* | |
5f24ce5f AA |
16 | * Types for free bootmem stored in page->lru.next. These have to be in |
17 | * some random range in unsigned long space for debugging purposes. | |
04753278 | 18 | */ |
5f24ce5f AA |
19 | enum { |
20 | MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, | |
21 | SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, | |
22 | MIX_SECTION_INFO, | |
23 | NODE_INFO, | |
24 | MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO, | |
25 | }; | |
04753278 | 26 | |
208d54e5 DH |
27 | /* |
28 | * pgdat resizing functions | |
29 | */ | |
30 | static inline | |
31 | void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) | |
32 | { | |
33 | spin_lock_irqsave(&pgdat->node_size_lock, *flags); | |
34 | } | |
35 | static inline | |
36 | void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) | |
37 | { | |
bdc8cb98 | 38 | spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); |
208d54e5 DH |
39 | } |
40 | static inline | |
41 | void pgdat_resize_init(struct pglist_data *pgdat) | |
42 | { | |
43 | spin_lock_init(&pgdat->node_size_lock); | |
44 | } | |
bdc8cb98 DH |
45 | /* |
46 | * Zone resizing functions | |
47 | */ | |
48 | static inline unsigned zone_span_seqbegin(struct zone *zone) | |
49 | { | |
50 | return read_seqbegin(&zone->span_seqlock); | |
51 | } | |
52 | static inline int zone_span_seqretry(struct zone *zone, unsigned iv) | |
53 | { | |
54 | return read_seqretry(&zone->span_seqlock, iv); | |
55 | } | |
56 | static inline void zone_span_writelock(struct zone *zone) | |
57 | { | |
58 | write_seqlock(&zone->span_seqlock); | |
59 | } | |
60 | static inline void zone_span_writeunlock(struct zone *zone) | |
61 | { | |
62 | write_sequnlock(&zone->span_seqlock); | |
63 | } | |
64 | static inline void zone_seqlock_init(struct zone *zone) | |
65 | { | |
66 | seqlock_init(&zone->span_seqlock); | |
67 | } | |
3947be19 DH |
68 | extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); |
69 | extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); | |
70 | extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); | |
71 | /* need some defines for these for archs that don't support it */ | |
72 | extern void online_page(struct page *page); | |
73 | /* VM interface that may be used by firmware interface */ | |
3947be19 | 74 | extern int online_pages(unsigned long, unsigned long); |
0c0e6195 | 75 | extern void __offline_isolated_pages(unsigned long, unsigned long); |
48e94196 | 76 | |
49ac8255 KH |
77 | #ifdef CONFIG_MEMORY_HOTREMOVE |
78 | extern bool is_pageblock_removable_nolock(struct page *page); | |
79 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | |
80 | ||
3947be19 | 81 | /* reasonably generic interface to expand the physical pages in a zone */ |
c04fc586 | 82 | extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn, |
3947be19 | 83 | unsigned long nr_pages); |
ea01ea93 BP |
84 | extern int __remove_pages(struct zone *zone, unsigned long start_pfn, |
85 | unsigned long nr_pages); | |
bc02af93 YG |
86 | |
87 | #ifdef CONFIG_NUMA | |
88 | extern int memory_add_physaddr_to_nid(u64 start); | |
89 | #else | |
90 | static inline int memory_add_physaddr_to_nid(u64 start) | |
91 | { | |
92 | return 0; | |
93 | } | |
94 | #endif | |
95 | ||
306d6cbe YG |
96 | #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION |
97 | /* | |
98 | * For supporting node-hotadd, we have to allocate a new pgdat. | |
99 | * | |
100 | * If an arch has generic style NODE_DATA(), | |
101 | * node_data[nid] = kzalloc() works well. But it depends on the architecture. | |
102 | * | |
103 | * In general, generic_alloc_nodedata() is used. | |
104 | * Now, arch_free_nodedata() is just defined for error path of node_hot_add. | |
105 | * | |
106 | */ | |
dd0932d9 YG |
107 | extern pg_data_t *arch_alloc_nodedata(int nid); |
108 | extern void arch_free_nodedata(pg_data_t *pgdat); | |
7049027c | 109 | extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); |
306d6cbe YG |
110 | |
111 | #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ | |
112 | ||
113 | #define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) | |
114 | #define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat) | |
115 | ||
116 | #ifdef CONFIG_NUMA | |
117 | /* | |
118 | * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat. | |
119 | * XXX: kmalloc_node() can't work well to get new node's memory at this time. | |
120 | * Because, pgdat for the new node is not allocated/initialized yet itself. | |
121 | * To use new node's memory, more consideration will be necessary. | |
122 | */ | |
123 | #define generic_alloc_nodedata(nid) \ | |
124 | ({ \ | |
125 | kzalloc(sizeof(pg_data_t), GFP_KERNEL); \ | |
126 | }) | |
127 | /* | |
128 | * This definition is just for error path in node hotadd. | |
129 | * For node hotremove, we have to replace this. | |
130 | */ | |
131 | #define generic_free_nodedata(pgdat) kfree(pgdat) | |
132 | ||
10ad400b YG |
133 | extern pg_data_t *node_data[]; |
134 | static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) | |
135 | { | |
136 | node_data[nid] = pgdat; | |
137 | } | |
138 | ||
306d6cbe YG |
139 | #else /* !CONFIG_NUMA */ |
140 | ||
141 | /* never called */ | |
142 | static inline pg_data_t *generic_alloc_nodedata(int nid) | |
143 | { | |
144 | BUG(); | |
145 | return NULL; | |
146 | } | |
147 | static inline void generic_free_nodedata(pg_data_t *pgdat) | |
148 | { | |
149 | } | |
10ad400b YG |
150 | static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) |
151 | { | |
152 | } | |
306d6cbe YG |
153 | #endif /* CONFIG_NUMA */ |
154 | #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ | |
155 | ||
04753278 YG |
156 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
157 | static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) | |
158 | { | |
159 | } | |
160 | static inline void put_page_bootmem(struct page *page) | |
161 | { | |
162 | } | |
163 | #else | |
164 | extern void register_page_bootmem_info_node(struct pglist_data *pgdat); | |
165 | extern void put_page_bootmem(struct page *page); | |
166 | #endif | |
167 | ||
925268a0 KH |
168 | /* |
169 | * Lock for memory hotplug guarantees 1) all callbacks for memory hotplug | |
170 | * notifier will be called under this. 2) offline/online/add/remove memory | |
171 | * will not run simultaneously. | |
172 | */ | |
173 | ||
20d6c96b KM |
174 | void lock_memory_hotplug(void); |
175 | void unlock_memory_hotplug(void); | |
176 | ||
208d54e5 DH |
177 | #else /* ! CONFIG_MEMORY_HOTPLUG */ |
178 | /* | |
179 | * Stub functions for when hotplug is off | |
180 | */ | |
181 | static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} | |
182 | static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} | |
183 | static inline void pgdat_resize_init(struct pglist_data *pgdat) {} | |
bdc8cb98 DH |
184 | |
185 | static inline unsigned zone_span_seqbegin(struct zone *zone) | |
186 | { | |
187 | return 0; | |
188 | } | |
189 | static inline int zone_span_seqretry(struct zone *zone, unsigned iv) | |
190 | { | |
191 | return 0; | |
192 | } | |
193 | static inline void zone_span_writelock(struct zone *zone) {} | |
194 | static inline void zone_span_writeunlock(struct zone *zone) {} | |
195 | static inline void zone_seqlock_init(struct zone *zone) {} | |
3947be19 DH |
196 | |
197 | static inline int mhp_notimplemented(const char *func) | |
198 | { | |
199 | printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); | |
200 | dump_stack(); | |
201 | return -ENOSYS; | |
202 | } | |
203 | ||
04753278 YG |
204 | static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) |
205 | { | |
206 | } | |
207 | ||
20d6c96b KM |
208 | static inline void lock_memory_hotplug(void) {} |
209 | static inline void unlock_memory_hotplug(void) {} | |
210 | ||
bdc8cb98 | 211 | #endif /* ! CONFIG_MEMORY_HOTPLUG */ |
9d99aaa3 | 212 | |
5c755e9f BP |
213 | #ifdef CONFIG_MEMORY_HOTREMOVE |
214 | ||
215 | extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); | |
216 | ||
217 | #else | |
218 | static inline int is_mem_section_removable(unsigned long pfn, | |
219 | unsigned long nr_pages) | |
220 | { | |
221 | return 0; | |
222 | } | |
223 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | |
224 | ||
cf23422b | 225 | extern int mem_online_node(int nid); |
bc02af93 YG |
226 | extern int add_memory(int nid, u64 start, u64 size); |
227 | extern int arch_add_memory(int nid, u64 start, u64 size); | |
9d99aaa3 | 228 | extern int remove_memory(u64 start, u64 size); |
f28c5edc KM |
229 | extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, |
230 | int nr_pages); | |
ea01ea93 | 231 | extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms); |
04753278 YG |
232 | extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, |
233 | unsigned long pnum); | |
9d99aaa3 | 234 | |
208d54e5 | 235 | #endif /* __LINUX_MEMORY_HOTPLUG_H */ |