2 * Xen implementation for transcendent memory (tmem)
4 * Copyright (C) 2009-2011 Oracle Corp. All rights reserved.
5 * Author: Dan Magenheimer
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/init.h>
12 #include <linux/pagemap.h>
13 #include <linux/cleancache.h>
14 #include <linux/frontswap.h>
17 #include <xen/interface/xen.h>
18 #include <asm/xen/hypercall.h>
19 #include <asm/xen/page.h>
20 #include <asm/xen/hypervisor.h>
23 #ifndef CONFIG_XEN_TMEM_MODULE
24 bool __read_mostly tmem_enabled = false;
26 static int __init enable_tmem(char *s)
31 __setup("tmem", enable_tmem);
34 #ifdef CONFIG_CLEANCACHE
35 static bool cleancache __read_mostly = true;
36 module_param(cleancache, bool, S_IRUGO);
37 static bool selfballooning __read_mostly = true;
38 module_param(selfballooning, bool, S_IRUGO);
39 #endif /* CONFIG_CLEANCACHE */
41 #ifdef CONFIG_FRONTSWAP
42 static bool frontswap __read_mostly = true;
43 module_param(frontswap, bool, S_IRUGO);
44 #endif /* CONFIG_FRONTSWAP */
46 #ifdef CONFIG_XEN_SELFBALLOONING
47 static bool selfshrinking __read_mostly = true;
48 module_param(selfshrinking, bool, S_IRUGO);
49 #endif /* CONFIG_XEN_SELFBALLOONING */
51 #define TMEM_CONTROL 0
52 #define TMEM_NEW_POOL 1
53 #define TMEM_DESTROY_POOL 2
54 #define TMEM_NEW_PAGE 3
55 #define TMEM_PUT_PAGE 4
56 #define TMEM_GET_PAGE 5
57 #define TMEM_FLUSH_PAGE 6
58 #define TMEM_FLUSH_OBJECT 7
63 /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
64 #define TMEM_POOL_PERSIST 1
65 #define TMEM_POOL_SHARED 2
66 #define TMEM_POOL_PAGESIZE_SHIFT 4
67 #define TMEM_VERSION_SHIFT 24
70 struct tmem_pool_uuid {
79 #define TMEM_POOL_PRIVATE_UUID { 0, 0 }
81 /* flags for tmem_ops.new_pool */
82 #define TMEM_POOL_PERSIST 1
83 #define TMEM_POOL_SHARED 2
85 /* xen tmem foundation ops/hypercalls */
87 static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
88 u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
94 op.pool_id = tmem_pool;
95 op.u.gen.oid[0] = oid.oid[0];
96 op.u.gen.oid[1] = oid.oid[1];
97 op.u.gen.oid[2] = oid.oid[2];
98 op.u.gen.index = index;
99 op.u.gen.tmem_offset = tmem_offset;
100 op.u.gen.pfn_offset = pfn_offset;
102 set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
103 rc = HYPERVISOR_tmem_op(&op);
107 static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
108 u32 flags, unsigned long pagesize)
111 int rc = 0, pageshift;
113 for (pageshift = 0; pagesize != 1; pageshift++)
115 flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
116 flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
117 op.cmd = TMEM_NEW_POOL;
118 op.u.new.uuid[0] = uuid.uuid_lo;
119 op.u.new.uuid[1] = uuid.uuid_hi;
120 op.u.new.flags = flags;
121 rc = HYPERVISOR_tmem_op(&op);
125 /* xen generic tmem ops */
127 static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
128 u32 index, unsigned long pfn)
130 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
132 return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
136 static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
137 u32 index, unsigned long pfn)
139 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
141 return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
145 static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
147 return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
151 static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
153 return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
157 #ifdef CONFIG_CLEANCACHE
158 static int xen_tmem_destroy_pool(u32 pool_id)
160 struct tmem_oid oid = { { 0 } };
162 return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
167 static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
168 pgoff_t index, struct page *page)
170 u32 ind = (u32) index;
171 struct tmem_oid oid = *(struct tmem_oid *)&key;
172 unsigned long pfn = page_to_pfn(page);
178 mb(); /* ensure page is quiescent; tmem may address it with an alias */
179 (void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
182 static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
183 pgoff_t index, struct page *page)
185 u32 ind = (u32) index;
186 struct tmem_oid oid = *(struct tmem_oid *)&key;
187 unsigned long pfn = page_to_pfn(page);
190 /* translate return values to linux semantics */
195 ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
202 static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
205 u32 ind = (u32) index;
206 struct tmem_oid oid = *(struct tmem_oid *)&key;
212 (void)xen_tmem_flush_page((u32)pool, oid, ind);
215 static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
217 struct tmem_oid oid = *(struct tmem_oid *)&key;
221 (void)xen_tmem_flush_object((u32)pool, oid);
224 static void tmem_cleancache_flush_fs(int pool)
228 (void)xen_tmem_destroy_pool((u32)pool);
231 static int tmem_cleancache_init_fs(size_t pagesize)
233 struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
235 return xen_tmem_new_pool(uuid_private, 0, pagesize);
238 static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
240 struct tmem_pool_uuid shared_uuid;
242 shared_uuid.uuid_lo = *(u64 *)uuid;
243 shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
244 return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
247 static struct cleancache_ops tmem_cleancache_ops = {
248 .put_page = tmem_cleancache_put_page,
249 .get_page = tmem_cleancache_get_page,
250 .invalidate_page = tmem_cleancache_flush_page,
251 .invalidate_inode = tmem_cleancache_flush_inode,
252 .invalidate_fs = tmem_cleancache_flush_fs,
253 .init_shared_fs = tmem_cleancache_init_shared_fs,
254 .init_fs = tmem_cleancache_init_fs
258 #ifdef CONFIG_FRONTSWAP
259 /* frontswap tmem operations */
261 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
262 static int tmem_frontswap_poolid;
265 * Swizzling increases objects per swaptype, increasing tmem concurrency
266 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
269 #define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
270 #define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
271 #define iswiz(_ind) (_ind >> SWIZ_BITS)
273 static inline struct tmem_oid oswiz(unsigned type, u32 ind)
275 struct tmem_oid oid = { .oid = { 0 } };
276 oid.oid[0] = _oswiz(type, ind);
280 /* returns 0 if the page was successfully put into frontswap, -1 if not */
281 static int tmem_frontswap_store(unsigned type, pgoff_t offset,
284 u64 ind64 = (u64)offset;
285 u32 ind = (u32)offset;
286 unsigned long pfn = page_to_pfn(page);
287 int pool = tmem_frontswap_poolid;
294 mb(); /* ensure page is quiescent; tmem may address it with an alias */
295 ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn);
296 /* translate Xen tmem return values to linux semantics */
304 * returns 0 if the page was successfully gotten from frontswap, -1 if
305 * was not present (should never happen!)
307 static int tmem_frontswap_load(unsigned type, pgoff_t offset,
310 u64 ind64 = (u64)offset;
311 u32 ind = (u32)offset;
312 unsigned long pfn = page_to_pfn(page);
313 int pool = tmem_frontswap_poolid;
320 ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn);
321 /* translate Xen tmem return values to linux semantics */
328 /* flush a single page from frontswap */
329 static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
331 u64 ind64 = (u64)offset;
332 u32 ind = (u32)offset;
333 int pool = tmem_frontswap_poolid;
339 (void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
342 /* flush all pages from the passed swaptype */
343 static void tmem_frontswap_flush_area(unsigned type)
345 int pool = tmem_frontswap_poolid;
350 for (ind = SWIZ_MASK; ind >= 0; ind--)
351 (void)xen_tmem_flush_object(pool, oswiz(type, ind));
354 static void tmem_frontswap_init(unsigned ignored)
356 struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;
358 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
359 if (tmem_frontswap_poolid < 0)
360 tmem_frontswap_poolid =
361 xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
364 static struct frontswap_ops tmem_frontswap_ops = {
365 .store = tmem_frontswap_store,
366 .load = tmem_frontswap_load,
367 .invalidate_page = tmem_frontswap_flush_page,
368 .invalidate_area = tmem_frontswap_flush_area,
369 .init = tmem_frontswap_init
373 static int xen_tmem_init(void)
377 #ifdef CONFIG_FRONTSWAP
378 if (tmem_enabled && frontswap) {
380 struct frontswap_ops *old_ops =
381 frontswap_register_ops(&tmem_frontswap_ops);
383 tmem_frontswap_poolid = -1;
384 if (IS_ERR(old_ops) || old_ops) {
386 return PTR_ERR(old_ops);
387 s = " (WARNING: frontswap_ops overridden)";
389 printk(KERN_INFO "frontswap enabled, RAM provided by "
390 "Xen Transcendent Memory%s\n", s);
393 #ifdef CONFIG_CLEANCACHE
394 BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
395 if (tmem_enabled && cleancache) {
397 struct cleancache_ops *old_ops =
398 cleancache_register_ops(&tmem_cleancache_ops);
400 s = " (WARNING: cleancache_ops overridden)";
401 printk(KERN_INFO "cleancache enabled, RAM provided by "
402 "Xen Transcendent Memory%s\n", s);
405 #ifdef CONFIG_XEN_SELFBALLOONING
406 xen_selfballoon_init(selfballooning, selfshrinking);
411 module_init(xen_tmem_init)
412 MODULE_LICENSE("GPL");
414 MODULE_DESCRIPTION("Shim to Xen transcendent memory");