#ifndef _LINUX_MM_H
#define _LINUX_MM_H
-#include <linux/sched.h>
#include <linux/errno.h>
-#include <linux/capability.h>
#ifdef __KERNEL__
struct mempolicy;
struct anon_vma;
+struct user_struct;
#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
extern unsigned long num_physpages;
extern void * high_memory;
-extern unsigned long vmalloc_earlyreserve;
extern int page_cluster;
#ifdef CONFIG_SYSCTL
static inline struct page *compound_head(struct page *page)
{
- /*
- * We could avoid the PageCompound(page) check if
- * we would not overload PageTail().
- *
- * This check has to be done in several performance critical
- * paths of the slab etc. IMHO PageTail deserves its own flag.
- */
- if (unlikely(PageCompound(page) && PageTail(page)))
+ if (unlikely(PageTail(page)))
return page->first_page;
return page;
}
atomic_inc(&page->_count);
}
+static inline struct page *virt_to_head_page(const void *x)
+{
+ struct page *page = virt_to_page(x);
+ return compound_head(page);
+}
+
/*
* Setup the page count before being freed into the page allocator for
* the first time (boot or memory hotplug)
static inline int compound_order(struct page *page)
{
- if (!PageCompound(page) || PageTail(page))
+ if (!PageHead(page))
return 0;
return (unsigned long)page[1].lru.prev;
}
{
struct address_space *mapping = page->mapping;
+ VM_BUG_ON(PageSlab(page));
if (unlikely(PageSwapCache(page)))
mapping = &swapper_space;
+#ifdef CONFIG_SLUB
+ else if (unlikely(PageSlab(page)))
+ mapping = NULL;
+#endif
else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
mapping = NULL;
return mapping;
unsigned long flags);
#endif
-static inline int can_do_mlock(void)
-{
- if (capable(CAP_IPC_LOCK))
- return 1;
- if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0)
- return 1;
- return 0;
-}
+extern int can_do_mlock(void);
extern int user_shm_lock(size_t, struct user_struct *);
extern void user_shm_unlock(size_t, struct user_struct *);
unsigned long flags, unsigned long new_addr);
/*
- * Prototype to add a shrinker callback for ageable caches.
- *
- * These functions are passed a count `nr_to_scan' and a gfpmask. They should
- * scan `nr_to_scan' objects, attempting to free them.
+ * A callback you can register to apply pressure to ageable caches.
*
- * The callback must return the number of objects which remain in the cache.
+ * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should
+ * look through the least-recently-used 'nr_to_scan' entries and
+ * attempt to free them up. It should return the number of objects
+ * which remain in the cache. If it returns -1, it means it cannot do
+ * any scanning at this time (eg. there is a risk of deadlock).
*
- * The callback will be passed nr_to_scan == 0 when the VM is querying the
- * cache size, so a fastpath for that case is appropriate.
- */
-typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask);
-
-/*
- * Add an aging callback. The int is the number of 'seeks' it takes
- * to recreate one of the objects that these functions age.
+ * The 'gfpmask' refers to the allocation we are currently trying to
+ * fulfil.
+ *
+ * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
+ * querying the cache size, so a fastpath for that case is appropriate.
*/
+struct shrinker {
+ int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
+ int seeks; /* seeks to recreate an obj */
-#define DEFAULT_SEEKS 2
-struct shrinker;
-extern struct shrinker *set_shrinker(int, shrinker_t);
-extern void remove_shrinker(struct shrinker *shrinker);
+ /* These are for internal use */
+ struct list_head list;
+ long nr; /* objs pending delete */
+};
+#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
+extern void register_shrinker(struct shrinker *);
+extern void unregister_shrinker(struct shrinker *);
/*
* Some shared mappigns will want the pages marked read-only
extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long pgoff);
+extern unsigned long mmap_region(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long flags,
+ unsigned int vm_flags, unsigned long pgoff,
+ int accountable);
static inline unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,