]> Git Repo - linux.git/blame - include/asm-powerpc/page_64.h
[POWERPC] Advertise correct IDE mode on Pegasos2
[linux.git] / include / asm-powerpc / page_64.h
CommitLineData
5cd16ee9
ME
1#ifndef _ASM_POWERPC_PAGE_64_H
2#define _ASM_POWERPC_PAGE_64_H
88ced031 3#ifdef __KERNEL__
5cd16ee9
ME
4
5/*
6 * Copyright (C) 2001 PPC64 Team, IBM Corp
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14/*
15 * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
16 * specific, every notion of page number shared with the firmware, TCEs,
17 * iommu, etc... still uses a page size of 4K.
18 */
19#define HW_PAGE_SHIFT 12
20#define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
21#define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
22
23/*
24 * PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
25 * HW_PAGE_SHIFT, that is 4K pages.
26 */
27#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
28
5cd16ee9
ME
29/* Segment size */
30#define SID_SHIFT 28
31#define SID_MASK 0xfffffffffUL
32#define ESID_MASK 0xfffffffff0000000UL
33#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
34
35#ifndef __ASSEMBLY__
36#include <asm/cache.h>
37
38typedef unsigned long pte_basic_t;
39
40static __inline__ void clear_page(void *addr)
41{
42 unsigned long lines, line_size;
43
44 line_size = ppc64_caches.dline_size;
45 lines = ppc64_caches.dlines_per_page;
46
47 __asm__ __volatile__(
48 "mtctr %1 # clear_page\n\
491: dcbz 0,%0\n\
50 add %0,%0,%3\n\
51 bdnz+ 1b"
52 : "=r" (addr)
53 : "r" (lines), "0" (addr), "r" (line_size)
54 : "ctr", "memory");
55}
56
57extern void copy_4K_page(void *to, void *from);
58
59#ifdef CONFIG_PPC_64K_PAGES
60static inline void copy_page(void *to, void *from)
61{
62 unsigned int i;
63 for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
64 copy_4K_page(to, from);
65 to += 4096;
66 from += 4096;
67 }
68}
69#else /* CONFIG_PPC_64K_PAGES */
70static inline void copy_page(void *to, void *from)
71{
72 copy_4K_page(to, from);
73}
74#endif /* CONFIG_PPC_64K_PAGES */
75
76/* Log 2 of page table size */
77extern u64 ppc64_pft_size;
78
79/* Large pages size */
b50ce232 80#ifdef CONFIG_HUGETLB_PAGE
5cd16ee9 81extern unsigned int HPAGE_SHIFT;
b50ce232
AW
82#else
83#define HPAGE_SHIFT PAGE_SHIFT
84#endif
5cd16ee9
ME
85#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
86#define HPAGE_MASK (~(HPAGE_SIZE - 1))
87#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
88
89#endif /* __ASSEMBLY__ */
90
d0f13e3c 91#ifdef CONFIG_PPC_MM_SLICES
5cd16ee9 92
d0f13e3c
BH
93#define SLICE_LOW_SHIFT 28
94#define SLICE_HIGH_SHIFT 40
5cd16ee9 95
d0f13e3c
BH
96#define SLICE_LOW_TOP (0x100000000ul)
97#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
98#define SLICE_NUM_HIGH (PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
5cd16ee9 99
d0f13e3c
BH
100#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
101#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
5cd16ee9 102
d0f13e3c
BH
103#ifndef __ASSEMBLY__
104
105struct slice_mask {
106 u16 low_slices;
107 u16 high_slices;
108};
109
110struct mm_struct;
5cd16ee9 111
d0f13e3c
BH
112extern unsigned long slice_get_unmapped_area(unsigned long addr,
113 unsigned long len,
114 unsigned long flags,
115 unsigned int psize,
116 int topdown,
117 int use_cache);
5cd16ee9 118
d0f13e3c
BH
119extern unsigned int get_slice_psize(struct mm_struct *mm,
120 unsigned long addr);
5cd16ee9 121
d0f13e3c
BH
122extern void slice_init_context(struct mm_struct *mm, unsigned int psize);
123extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
e8ff0646 124#define slice_mm_new_context(mm) ((mm)->context.id == 0)
d0f13e3c
BH
125
126#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
127extern int is_hugepage_only_range(struct mm_struct *m,
128 unsigned long addr,
129 unsigned long len);
130
131#endif /* __ASSEMBLY__ */
132#else
133#define slice_init()
e8ff0646
SR
134#define slice_set_user_psize(mm, psize) \
135do { \
136 (mm)->context.user_psize = (psize); \
137 (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
138} while (0)
139#define slice_mm_new_context(mm) 1
d0f13e3c
BH
140#endif /* CONFIG_PPC_MM_SLICES */
141
142#ifdef CONFIG_HUGETLB_PAGE
143
144#define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
145#define ARCH_HAS_SETCLEAR_HUGE_PTE
146#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
5cd16ee9
ME
147
148#endif /* !CONFIG_HUGETLB_PAGE */
149
150#ifdef MODULE
151#define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
152#else
153#define __page_aligned \
154 __attribute__((__aligned__(PAGE_SIZE), \
155 __section__(".data.page_aligned")))
156#endif
157
158#define VM_DATA_DEFAULT_FLAGS \
159 (test_thread_flag(TIF_32BIT) ? \
160 VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
161
162/*
163 * This is the default if a program doesn't have a PT_GNU_STACK
164 * program header entry. The PPC64 ELF ABI has a non executable stack
165 * stack by default, so in the absense of a PT_GNU_STACK program header
166 * we turn execute permission off.
167 */
168#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
169 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
170
171#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
172 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
173
174#define VM_STACK_DEFAULT_FLAGS \
175 (test_thread_flag(TIF_32BIT) ? \
176 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
177
178#include <asm-generic/page.h>
179
88ced031 180#endif /* __KERNEL__ */
5cd16ee9 181#endif /* _ASM_POWERPC_PAGE_64_H */
This page took 0.266033 seconds and 4 git commands to generate.