DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist);
-DEFINE_PER_CPU(long, __pgtable_quicklist_size);
-
extern void ia64_tlb_init (void);
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
struct page *zero_page_memmap_ptr; /* map entry for zero page */
EXPORT_SYMBOL(zero_page_memmap_ptr);
-#define MIN_PGT_PAGES 25UL
-#define MAX_PGT_FREES_PER_PASS 16L
-#define PGT_FRACTION_OF_NODE_MEM 16
-
-static inline long
-max_pgt_pages(void)
-{
- u64 node_free_pages, max_pgt_pages;
-
-#ifndef CONFIG_NUMA
- node_free_pages = nr_free_pages();
-#else
- node_free_pages = node_page_state(numa_node_id(), NR_FREE_PAGES);
-#endif
- max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM;
- max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES);
- return max_pgt_pages;
-}
-
-static inline long
-min_pages_to_free(void)
-{
- long pages_to_free;
-
- pages_to_free = pgtable_quicklist_size - max_pgt_pages();
- pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS);
- return pages_to_free;
-}
-
void
-check_pgt_cache(void)
-{
- long pages_to_free;
-
- if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES))
- return;
-
- preempt_disable();
- while (unlikely((pages_to_free = min_pages_to_free()) > 0)) {
- while (pages_to_free--) {
- free_page((unsigned long)pgtable_quicklist_alloc());
- }
- preempt_enable();
- preempt_disable();
- }
- preempt_enable();
-}
-
-void
-lazy_mmu_prot_update (pte_t pte)
+__ia64_sync_icache_dcache (pte_t pte)
{
unsigned long addr;
struct page *page;
unsigned long order;
- if (!pte_exec(pte))
- return; /* not an executable page... */
-
page = pte_page(pte);
addr = (unsigned long) page_address(page);
return; /* i-cache is already coherent with d-cache */
if (PageCompound(page)) {
- order = (unsigned long) (page[1].lru.prev);
+ order = compound_order(page);
flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT));
}
else
if (stack_size > MAX_USER_STACK_SIZE)
stack_size = MAX_USER_STACK_SIZE;
- current->thread.rbs_bot = STACK_TOP - stack_size;
+ current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
}
/*
vma->vm_mm = current->mm;
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
- vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
down_write(¤t->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
up_write(¤t->mm->mmap_sem);
void __devinit
ia64_mmu_init (void *my_cpu_data)
{
- unsigned long psr, pta, impl_va_bits;
+ unsigned long pta, impl_va_bits;
extern void __devinit tlb_init (void);
#ifdef CONFIG_DISABLE_VHPT
# define VHPT_ENABLE_BIT 1
#endif
- /* Pin mapping for percpu area into TLB */
- psr = ia64_clear_ic();
- ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
- pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
- PERCPU_PAGE_SHIFT);
-
- ia64_set_psr(psr);
- ia64_srlz_i();
-
/*
* Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
* address space. The IA-64 architecture guarantees that at least 50 bits of
return 0;
}
+int
+find_max_min_low_pfn (unsigned long start, unsigned long end, void *arg)
+{
+ unsigned long pfn_start, pfn_end;
+#ifdef CONFIG_FLATMEM
+ pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
+ pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
+#else
+ pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
+ pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
+#endif
+ min_low_pfn = min(min_low_pfn, pfn_start);
+ max_low_pfn = max(max_low_pfn, pfn_end);
+ return 0;
+}
+
/*
* Boot command-line option "nolwsys" can be used to disable the use of any light-weight
* system call handler. When this option is in effect, all fsyscalls will end up bubbling
return ret;
}
-
+#ifdef CONFIG_MEMORY_HOTREMOVE
int remove_memory(u64 start, u64 size)
{
- return -EINVAL;
+ unsigned long start_pfn, end_pfn;
+ unsigned long timeout = 120 * HZ;
+ int ret;
+ start_pfn = start >> PAGE_SHIFT;
+ end_pfn = start_pfn + (size >> PAGE_SHIFT);
+ ret = offline_pages(start_pfn, end_pfn, timeout);
+ if (ret)
+ goto out;
+ /* we can free mem_map at this point */
+out:
+ return ret;
}
EXPORT_SYMBOL_GPL(remove_memory);
+#endif /* CONFIG_MEMORY_HOTREMOVE */
#endif