ARM/CSR SIRFPRIMA2 MACHINE SUPPORT
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/baohua/linux.git
S: Maintained
F: arch/arm/mach-prima2/
F: drivers/dma/sirf-dma.c
S: Maintained
F: arch/arm/mach-s5p*/
F: arch/arm/mach-exynos*/
+N: exynos
ARM/SAMSUNG MOBILE MACHINE SUPPORT
S: Maintained
F: drivers/clk/socfpga/
+ARM/STI ARCHITECTURE
+W: http://www.stlinux.com
+S: Maintained
+F: arch/arm/mach-sti/
+
ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT
ECRYPT FILE SYSTEM
+W: http://ecryptfs.org
W: https://launchpad.net/ecryptfs
S: Supported
F: Documentation/filesystems/ecryptfs.txt
FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
W: www.Open-FCoE.org
S: Supported
F: drivers/scsi/libfc/
S: Odd fixes
F: drivers/block/floppy.c
+FMC SUBSYSTEM
+W: http://www.ohwr.org/projects/fmc-bus
+S: Supported
+F: drivers/fmc/
+F: include/linux/fmc*.h
+F: include/linux/ipmi-fru.h
+K: fmc_d.*register
+
FPU EMULATOR
W: http://floatingpoint.sourceforge.net/emulator/index.html
F: drivers/scsi/*iscsi*
F: include/scsi/*iscsi*
+ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
+S: Supported
+W: http://www.openfabrics.org
+W: www.open-iscsi.org
+Q: http://patchwork.kernel.org/project/linux-rdma/list/
+F: drivers/infiniband/ulp/iser
+
ISDN SUBSYSTEM
F: include/linux/jbd2.h
JSM Neo PCI based serial card
-M: Lucas Tavares <lucaskt@linux.vnet.ibm.com>
+M: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
S: Maintained
F: drivers/tty/serial/jsm/
F: arch/arm/include/asm/kvm*
F: arch/arm/kvm/
+ KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
+ S: Maintained
+ F: arch/arm64/include/uapi/asm/kvm*
+ F: arch/arm64/include/asm/kvm*
+ F: arch/arm64/kvm/
+
KEXEC
W: http://kernel.org/pub/linux/utils/kernel/kexec/
T: git git://git.infradead.org/users/willy/linux-nvme.git
S: Supported
-F: drivers/block/nvme.c
+F: drivers/block/nvme*
F: include/linux/nvme.h
OMAP SUPPORT
SPI SUBSYSTEM
T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git
Q: http://patchwork.kernel.org/project/spi-devel-general/list/
S: Maintained
S: Supported
+F: Documentation/stable_kernel_rules.txt
STAGING SUBSYSTEM
STAGING - SPEAKUP CONSOLE SPEECH DRIVER
-M: Kirk Reiser <kirk@braille.uwo.ca>
+M: Kirk Reiser <kirk@reisers.ca>
W: http://www.linux-speakup.org/
F: drivers/net/wireless/wl3501*
WM97XX TOUCHSCREEN DRIVERS
-M: Mark Brown <broonie@opensource.wolfsonmicro.com>
+M: Mark Brown <broonie@kernel.org>
T: git git://opensource.wolfsonmicro.com/linux-2.6-touch
F: include/linux/wm97xx.h
WOLFSON MICROELECTRONICS DRIVERS
T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc
T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
F: arch/arm/xen/
F: arch/arm/include/asm/xen/
+ XEN HYPERVISOR ARM64
+ S: Supported
+ F: arch/arm64/xen/
+ F: arch/arm64/include/asm/xen/
+
XEN NETWORK BACKEND DRIVER
* Software defined PTE bits definition.
*/
#define PTE_VALID (_AT(pteval_t, 1) << 0)
- #define PTE_PROT_NONE (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */
- #define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
+ #define PTE_PROT_NONE (_AT(pteval_t, 1) << 2) /* only when !PTE_VALID */
+ #define PTE_FILE (_AT(pteval_t, 1) << 3) /* only when !pte_present() */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
#define _MOD_PROT(p, b) __pgprot_modify(p, 0, b)
- #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE)
+ #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN)
#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
- #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE)
+ #define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP)
+ #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
+
+ #define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
+ #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN)
+
+ #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN)
#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define pte_none(pte) (!pte_val(pte))
#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
- #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
+ #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
/*
* Huge pte definitions.
*/
- #define pte_huge(pte) ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE)
- #define pte_mkhuge(pte) (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE))
+ #define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT))
+ #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
+
+ /*
+ * Hugetlb definitions.
+ */
+ #define HUGE_MAX_HSTATE 2
+ #define HPAGE_SHIFT PMD_SHIFT
+ #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+ #define HPAGE_MASK (~(HPAGE_SIZE - 1))
+ #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define __HAVE_ARCH_PTE_SPECIAL
+ /*
+ * Software PMD bits for THP
+ */
+
+ #define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
+ #define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 57)
+
+ /*
+ * THP definitions.
+ */
+ #define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF)
+
+ #define __HAVE_ARCH_PMD_WRITE
+ #define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
+ #define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
+ #endif
+
+ #define PMD_BIT_FUNC(fn,op) \
+ static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
+
+ PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY);
+ PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
+ PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
+ PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY);
+ PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY);
+ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
+ PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK);
+
+ #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
+
+ #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
+ #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+ #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
+
+ #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
+
+ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+ {
+ const pmdval_t mask = PMD_SECT_USER | PMD_SECT_PXN | PMD_SECT_UXN |
+ PMD_SECT_RDONLY | PMD_SECT_PROT_NONE |
+ PMD_SECT_VALID;
+ pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
+ return pmd;
+ }
+
+ #define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd)
+
+ static inline int has_transparent_hugepage(void)
+ {
+ return 1;
+ }
+
/*
* Mark the prot value as uncacheable and unbufferable.
*/
#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
+ #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
+ PMD_TYPE_TABLE)
+ #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
+ PMD_TYPE_SECT)
+
+
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
#endif
/* Find an entry in the third-level page table.. */
- #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+ #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
/*
* Encode and decode a swap entry:
- * bits 0-1: present (must be zero)
- * bit 2: PTE_FILE
- * bits 3-8: swap type
+ * bits 0, 2: present (must both be zero)
+ * bit 3: PTE_FILE
+ * bits 4-8: swap type
* bits 9-63: swap offset
*/
- #define __SWP_TYPE_SHIFT 3
+ #define __SWP_TYPE_SHIFT 4
#define __SWP_TYPE_BITS 6
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
/*
* Encode and decode a file entry:
- * bits 0-1: present (must be zero)
- * bit 2: PTE_FILE
- * bits 3-63: file offset / PAGE_SIZE
+ * bits 0, 2: present (must both be zero)
+ * bit 3: PTE_FILE
+ * bits 4-63: file offset / PAGE_SIZE
*/
#define pte_file(pte) (pte_val(pte) & PTE_FILE)
- #define pte_to_pgoff(x) (pte_val(x) >> 3)
- #define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
+ #define pte_to_pgoff(x) (pte_val(x) >> 4)
+ #define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE)
- #define PTE_FILE_MAX_BITS 61
+ #define PTE_FILE_MAX_BITS 60
extern int kern_addr_valid(unsigned long addr);
#include <asm-generic/pgtable.h>
-/*
- * remap a physical page `pfn' of size `size' with page protection `prot'
- * into virtual address `from'
- */
-#define io_remap_pfn_range(vma,from,pfn,size,prot) \
- remap_pfn_range(vma, from, pfn, size, prot)
-
#define pgtable_cache_init() do { } while (0)
#endif /* !__ASSEMBLY__ */
config ARCH_SUSPEND_POSSIBLE
def_bool y
+ config ARCH_WANT_HUGE_PMD_SHARE
+ def_bool y
+
+ config ARCH_WANT_GENERAL_HUGETLB
+ def_bool y
+
config ZONE_DMA32
bool
default X86_64
If you enable this option then you'll be able to select support
for the following (non-PC) 32 bit x86 platforms:
+ Goldfish (Android emulator)
AMD Elan
NUMAQ (IBM/Sequent)
RDC R-321x SoC
config X86_GOLDFISH
bool "Goldfish (Virtual Platform)"
depends on X86_32
+ depends on X86_EXTENDED_PLATFORM
---help---
Enable support for the Goldfish virtual platform used primarily
for Android development. Unless you are building for the Android
depends on MICROCODE_INTEL
config MICROCODE_INTEL_EARLY
+ def_bool n
+
+config MICROCODE_AMD_EARLY
+ def_bool n
+
+config MICROCODE_EARLY
bool "Early load microcode"
- depends on MICROCODE_INTEL && BLK_DEV_INITRD
+ depends on MICROCODE=y && BLK_DEV_INITRD
+ select MICROCODE_INTEL_EARLY if MICROCODE_INTEL
+ select MICROCODE_AMD_EARLY if MICROCODE_AMD
default y
help
This option provides functionality to read additional microcode data
microcode to CPU's as early as possible. No functional change if no
microcode data is glued to the initrd, therefore it's safe to say Y.
-config MICROCODE_EARLY
- def_bool y
- depends on MICROCODE_INTEL_EARLY
-
config X86_MSR
tristate "/dev/cpu/*/msr - Model-specific register support"
---help---
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
- depends on SMP && HOTPLUG
+ depends on SMP
---help---
Say Y here to allow turning CPUs off and on. CPUs can be
controlled through /sys/devices/system/cpu.
config IA32_EMULATION
bool "IA32 Emulation"
depends on X86_64
+ select BINFMT_ELF
select COMPAT_BINFMT_ELF
select HAVE_UID16
---help---
int dequeue_hwpoisoned_huge_page(struct page *page);
void copy_huge_page(struct page *dst, struct page *src);
+ #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
+ #endif
+
extern unsigned long hugepages_treat_as_movable;
extern const unsigned long hugetlb_zero, hugetlb_infinity;
extern int sysctl_hugetlb_shm_group;
return h - hstates;
}
+pgoff_t __basepage_index(struct page *page);
+
+/* Return page->index in PAGE_SIZE units */
+static inline pgoff_t basepage_index(struct page *page)
+{
+ if (!PageCompound(page))
+ return page->index;
+
+ return __basepage_index(page);
+}
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
#define alloc_huge_page_node(h, nid) NULL
}
#define hstate_index_to_shift(index) 0
#define hstate_index(h) 0
+
+static inline pgoff_t basepage_index(struct page *page)
+{
+ return page->index;
+}
#endif /* CONFIG_HUGETLB_PAGE */
#endif /* _LINUX_HUGETLB_H */
#define KVM_CAP_IRQ_MPIC 90
#define KVM_CAP_PPC_RTAS 91
#define KVM_CAP_IRQ_XICS 92
+ #define KVM_CAP_ARM_EL1_32BIT 93
#ifdef KVM_CAP_IRQ_ROUTING
#define KVM_REG_IA64 0x3000000000000000ULL
#define KVM_REG_ARM 0x4000000000000000ULL
#define KVM_REG_S390 0x5000000000000000ULL
+ #define KVM_REG_ARM64 0x6000000000000000ULL
+#define KVM_REG_MIPS 0x7000000000000000ULL
#define KVM_REG_SIZE_SHIFT 52
#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
}
EXPORT_SYMBOL_GPL(PageHuge);
+pgoff_t __basepage_index(struct page *page)
+{
+ struct page *page_head = compound_head(page);
+ pgoff_t index = page_index(page_head);
+ unsigned long compound_idx;
+
+ if (!PageHuge(page_head))
+ return page_index(page);
+
+ if (compound_order(page_head) >= MAX_ORDER)
+ compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
+ else
+ compound_idx = page - page_head;
+
+ return (index << compound_order(page_head)) + compound_idx;
+}
+
static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
{
struct page *page;
if (ptep) {
entry = huge_ptep_get(ptep);
if (unlikely(is_hugetlb_entry_migration(entry))) {
- migration_entry_wait(mm, (pmd_t *)ptep, address);
+ migration_entry_wait_huge(mm, ptep);
return 0;
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
return VM_FAULT_HWPOISON_LARGE |
return ret;
}
- /* Can be overriden by architectures */
- __attribute__((weak)) struct page *
- follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int write)
- {
- BUG();
- return NULL;
- }
-
long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, unsigned long *nr_pages,
hugetlb_acct_memory(h, -(chg - freed));
}
+ #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+ static unsigned long page_table_shareable(struct vm_area_struct *svma,
+ struct vm_area_struct *vma,
+ unsigned long addr, pgoff_t idx)
+ {
+ unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
+ svma->vm_start;
+ unsigned long sbase = saddr & PUD_MASK;
+ unsigned long s_end = sbase + PUD_SIZE;
+
+ /* Allow segments to share if only one is marked locked */
+ unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
+ unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
+
+ /*
+ * match the virtual addresses, permission and the alignment of the
+ * page table page.
+ */
+ if (pmd_index(addr) != pmd_index(saddr) ||
+ vm_flags != svm_flags ||
+ sbase < svma->vm_start || svma->vm_end < s_end)
+ return 0;
+
+ return saddr;
+ }
+
+ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
+ {
+ unsigned long base = addr & PUD_MASK;
+ unsigned long end = base + PUD_SIZE;
+
+ /*
+ * check on proper vm_flags and page table alignment
+ */
+ if (vma->vm_flags & VM_MAYSHARE &&
+ vma->vm_start <= base && end <= vma->vm_end)
+ return 1;
+ return 0;
+ }
+
+ /*
+ * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
+ * and returns the corresponding pte. While this is not necessary for the
+ * !shared pmd case because we can allocate the pmd later as well, it makes the
+ * code much cleaner. pmd allocation is essential for the shared case because
+ * pud has to be populated inside the same i_mmap_mutex section - otherwise
+ * racing tasks could either miss the sharing (see huge_pte_offset) or select a
+ * bad pmd for sharing.
+ */
+ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+ {
+ struct vm_area_struct *vma = find_vma(mm, addr);
+ struct address_space *mapping = vma->vm_file->f_mapping;
+ pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
+ vma->vm_pgoff;
+ struct vm_area_struct *svma;
+ unsigned long saddr;
+ pte_t *spte = NULL;
+ pte_t *pte;
+
+ if (!vma_shareable(vma, addr))
+ return (pte_t *)pmd_alloc(mm, pud, addr);
+
+ mutex_lock(&mapping->i_mmap_mutex);
+ vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
+ if (svma == vma)
+ continue;
+
+ saddr = page_table_shareable(svma, vma, addr, idx);
+ if (saddr) {
+ spte = huge_pte_offset(svma->vm_mm, saddr);
+ if (spte) {
+ get_page(virt_to_page(spte));
+ break;
+ }
+ }
+ }
+
+ if (!spte)
+ goto out;
+
+ spin_lock(&mm->page_table_lock);
+ if (pud_none(*pud))
+ pud_populate(mm, pud,
+ (pmd_t *)((unsigned long)spte & PAGE_MASK));
+ else
+ put_page(virt_to_page(spte));
+ spin_unlock(&mm->page_table_lock);
+ out:
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ mutex_unlock(&mapping->i_mmap_mutex);
+ return pte;
+ }
+
+ /*
+ * unmap huge page backed by shared pte.
+ *
+ * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
+ * indicated by page_count > 1, unmap is achieved by clearing pud and
+ * decrementing the ref count. If count == 1, the pte page is not shared.
+ *
+ * called with vma->vm_mm->page_table_lock held.
+ *
+ * returns: 1 successfully unmapped a shared pte page
+ * 0 the underlying pte page is not shared, or it is the last user
+ */
+ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+ {
+ pgd_t *pgd = pgd_offset(mm, *addr);
+ pud_t *pud = pud_offset(pgd, *addr);
+
+ BUG_ON(page_count(virt_to_page(ptep)) == 0);
+ if (page_count(virt_to_page(ptep)) == 1)
+ return 0;
+
+ pud_clear(pud);
+ put_page(virt_to_page(ptep));
+ *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
+ return 1;
+ }
+ #define want_pmd_share() (1)
+ #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
+ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+ {
+ return NULL;
+ }
+ #define want_pmd_share() (0)
+ #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
+
+ #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
+ pte_t *huge_pte_alloc(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
+ {
+ pgd_t *pgd;
+ pud_t *pud;
+ pte_t *pte = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ pud = pud_alloc(mm, pgd, addr);
+ if (pud) {
+ if (sz == PUD_SIZE) {
+ pte = (pte_t *)pud;
+ } else {
+ BUG_ON(sz != PMD_SIZE);
+ if (want_pmd_share() && pud_none(*pud))
+ pte = huge_pmd_share(mm, addr, pud);
+ else
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ }
+ }
+ BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
+
+ return pte;
+ }
+
+ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+ {
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ if (pgd_present(*pgd)) {
+ pud = pud_offset(pgd, addr);
+ if (pud_present(*pud)) {
+ if (pud_huge(*pud))
+ return (pte_t *)pud;
+ pmd = pmd_offset(pud, addr);
+ }
+ }
+ return (pte_t *) pmd;
+ }
+
+ struct page *
+ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ pmd_t *pmd, int write)
+ {
+ struct page *page;
+
+ page = pte_page(*(pte_t *)pmd);
+ if (page)
+ page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
+ return page;
+ }
+
+ struct page *
+ follow_huge_pud(struct mm_struct *mm, unsigned long address,
+ pud_t *pud, int write)
+ {
+ struct page *page;
+
+ page = pte_page(*(pte_t *)pud);
+ if (page)
+ page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
+ return page;
+ }
+
+ #else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
+
+ /* Can be overriden by architectures */
+ __attribute__((weak)) struct page *
+ follow_huge_pud(struct mm_struct *mm, unsigned long address,
+ pud_t *pud, int write)
+ {
+ BUG();
+ return NULL;
+ }
+
+ #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
+
#ifdef CONFIG_MEMORY_FAILURE
/* Should be called in hugetlb_lock */