]> Git Repo - linux.git/blob - arch/sh/mm/mmap.c
ionic: fix kernel panic due to multi-buffer handling
[linux.git] / arch / sh / mm / mmap.c
1 /*
2  * arch/sh/mm/mmap.c
3  *
4  * Copyright (C) 2008 - 2009  Paul Mundt
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  */
10 #include <linux/io.h>
11 #include <linux/mm.h>
12 #include <linux/sched/mm.h>
13 #include <linux/mman.h>
14 #include <linux/module.h>
15 #include <asm/page.h>
16 #include <asm/processor.h>
17
18 unsigned long shm_align_mask = PAGE_SIZE - 1;   /* Sane caches */
19 EXPORT_SYMBOL(shm_align_mask);
20
21 #ifdef CONFIG_MMU
22 static const pgprot_t protection_map[16] = {
23         [VM_NONE]                                       = PAGE_NONE,
24         [VM_READ]                                       = PAGE_READONLY,
25         [VM_WRITE]                                      = PAGE_COPY,
26         [VM_WRITE | VM_READ]                            = PAGE_COPY,
27         [VM_EXEC]                                       = PAGE_EXECREAD,
28         [VM_EXEC | VM_READ]                             = PAGE_EXECREAD,
29         [VM_EXEC | VM_WRITE]                            = PAGE_COPY,
30         [VM_EXEC | VM_WRITE | VM_READ]                  = PAGE_COPY,
31         [VM_SHARED]                                     = PAGE_NONE,
32         [VM_SHARED | VM_READ]                           = PAGE_READONLY,
33         [VM_SHARED | VM_WRITE]                          = PAGE_WRITEONLY,
34         [VM_SHARED | VM_WRITE | VM_READ]                = PAGE_SHARED,
35         [VM_SHARED | VM_EXEC]                           = PAGE_EXECREAD,
36         [VM_SHARED | VM_EXEC | VM_READ]                 = PAGE_EXECREAD,
37         [VM_SHARED | VM_EXEC | VM_WRITE]                = PAGE_RWX,
38         [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]      = PAGE_RWX
39 };
40 DECLARE_VM_GET_PAGE_PROT
41
42 /*
43  * To avoid cache aliases, we map the shared page with same color.
44  */
45 static inline unsigned long COLOUR_ALIGN(unsigned long addr,
46                                          unsigned long pgoff)
47 {
48         unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
49         unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
50
51         return base + off;
52 }
53
54 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
55         unsigned long len, unsigned long pgoff, unsigned long flags)
56 {
57         struct mm_struct *mm = current->mm;
58         struct vm_area_struct *vma;
59         int do_colour_align;
60         struct vm_unmapped_area_info info = {};
61
62         if (flags & MAP_FIXED) {
63                 /* We do not accept a shared mapping if it would violate
64                  * cache aliasing constraints.
65                  */
66                 if ((flags & MAP_SHARED) &&
67                     ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
68                         return -EINVAL;
69                 return addr;
70         }
71
72         if (unlikely(len > TASK_SIZE))
73                 return -ENOMEM;
74
75         do_colour_align = 0;
76         if (filp || (flags & MAP_SHARED))
77                 do_colour_align = 1;
78
79         if (addr) {
80                 if (do_colour_align)
81                         addr = COLOUR_ALIGN(addr, pgoff);
82                 else
83                         addr = PAGE_ALIGN(addr);
84
85                 vma = find_vma(mm, addr);
86                 if (TASK_SIZE - len >= addr &&
87                     (!vma || addr + len <= vm_start_gap(vma)))
88                         return addr;
89         }
90
91         info.length = len;
92         info.low_limit = TASK_UNMAPPED_BASE;
93         info.high_limit = TASK_SIZE;
94         info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
95         info.align_offset = pgoff << PAGE_SHIFT;
96         return vm_unmapped_area(&info);
97 }
98
99 unsigned long
100 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
101                           const unsigned long len, const unsigned long pgoff,
102                           const unsigned long flags)
103 {
104         struct vm_area_struct *vma;
105         struct mm_struct *mm = current->mm;
106         unsigned long addr = addr0;
107         int do_colour_align;
108         struct vm_unmapped_area_info info = {};
109
110         if (flags & MAP_FIXED) {
111                 /* We do not accept a shared mapping if it would violate
112                  * cache aliasing constraints.
113                  */
114                 if ((flags & MAP_SHARED) &&
115                     ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
116                         return -EINVAL;
117                 return addr;
118         }
119
120         if (unlikely(len > TASK_SIZE))
121                 return -ENOMEM;
122
123         do_colour_align = 0;
124         if (filp || (flags & MAP_SHARED))
125                 do_colour_align = 1;
126
127         /* requesting a specific address */
128         if (addr) {
129                 if (do_colour_align)
130                         addr = COLOUR_ALIGN(addr, pgoff);
131                 else
132                         addr = PAGE_ALIGN(addr);
133
134                 vma = find_vma(mm, addr);
135                 if (TASK_SIZE - len >= addr &&
136                     (!vma || addr + len <= vm_start_gap(vma)))
137                         return addr;
138         }
139
140         info.flags = VM_UNMAPPED_AREA_TOPDOWN;
141         info.length = len;
142         info.low_limit = PAGE_SIZE;
143         info.high_limit = mm->mmap_base;
144         info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
145         info.align_offset = pgoff << PAGE_SHIFT;
146         addr = vm_unmapped_area(&info);
147
148         /*
149          * A failed mmap() very likely causes application failure,
150          * so fall back to the bottom-up function here. This scenario
151          * can happen with large stack limits and large mmap()
152          * allocations.
153          */
154         if (addr & ~PAGE_MASK) {
155                 VM_BUG_ON(addr != -ENOMEM);
156                 info.flags = 0;
157                 info.low_limit = TASK_UNMAPPED_BASE;
158                 info.high_limit = TASK_SIZE;
159                 addr = vm_unmapped_area(&info);
160         }
161
162         return addr;
163 }
164 #endif /* CONFIG_MMU */
165
166 /*
167  * You really shouldn't be using read() or write() on /dev/mem.  This
168  * might go away in the future.
169  */
170 int valid_phys_addr_range(phys_addr_t addr, size_t count)
171 {
172         if (addr < __MEMORY_START)
173                 return 0;
174         if (addr + count > __pa(high_memory))
175                 return 0;
176
177         return 1;
178 }
179
180 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
181 {
182         return 1;
183 }
This page took 0.045339 seconds and 4 git commands to generate.