]>
Commit | Line | Data |
---|---|---|
c7de829c WD |
1 | /**************************************************************************** |
2 | * | |
3 | * SciTech OS Portability Manager Library | |
8bde7f77 | 4 | * |
c7de829c WD |
5 | * ======================================================================== |
6 | * | |
7 | * The contents of this file are subject to the SciTech MGL Public | |
8 | * License Version 1.0 (the "License"); you may not use this file | |
9 | * except in compliance with the License. You may obtain a copy of | |
10 | * the License at http://www.scitechsoft.com/mgl-license.txt | |
11 | * | |
12 | * Software distributed under the License is distributed on an | |
13 | * "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or | |
14 | * implied. See the License for the specific language governing | |
15 | * rights and limitations under the License. | |
16 | * | |
17 | * The Original Code is Copyright (C) 1991-1998 SciTech Software, Inc. | |
18 | * | |
19 | * The Initial Developer of the Original Code is SciTech Software, Inc. | |
20 | * All Rights Reserved. | |
21 | * | |
22 | * ======================================================================== | |
23 | * | |
24 | * Language: ANSI C | |
25 | * Environment: 32-bit Windows NT device drivers. | |
26 | * | |
27 | * Description: Implementation for the NT driver memory management functions | |
28 | * for the PM library. | |
29 | * | |
30 | ****************************************************************************/ | |
31 | ||
32 | #include "pmapi.h" | |
33 | #include "drvlib/os/os.h" | |
34 | #include "sdd/sddhelp.h" | |
35 | #include "mtrr.h" | |
36 | #include "oshdr.h" | |
37 | ||
38 | /*--------------------------- Global variables ----------------------------*/ | |
39 | ||
40 | #define MAX_MEMORY_SHARED 100 | |
41 | #define MAX_MEMORY_MAPPINGS 100 | |
42 | #define MAX_MEMORY_LOCKED 100 | |
43 | ||
44 | typedef struct { | |
45 | void *linear; | |
46 | ulong length; | |
47 | PMDL pMdl; | |
48 | } memshared; | |
49 | ||
50 | typedef struct { | |
51 | void *linear; | |
52 | void *mmIoMapped; | |
53 | ulong length; | |
54 | PMDL pMdl; | |
55 | } memlocked; | |
56 | ||
57 | typedef struct { | |
58 | ulong physical; | |
59 | ulong linear; | |
60 | ulong length; | |
61 | ibool isCached; | |
62 | } mmapping; | |
63 | ||
64 | static int numMappings = 0; | |
65 | static memshared shared[MAX_MEMORY_MAPPINGS] = {0}; | |
66 | static mmapping maps[MAX_MEMORY_MAPPINGS]; | |
67 | static memlocked locked[MAX_MEMORY_LOCKED]; | |
68 | ||
69 | /*----------------------------- Implementation ----------------------------*/ | |
70 | ||
71 | ulong PMAPI _PM_getPDB(void); | |
72 | ||
8bde7f77 | 73 | /* Page table entry flags */ |
c7de829c WD |
74 | |
75 | #define PAGE_FLAGS_PRESENT 0x00000001 | |
76 | #define PAGE_FLAGS_WRITEABLE 0x00000002 | |
77 | #define PAGE_FLAGS_USER 0x00000004 | |
78 | #define PAGE_FLAGS_WRITE_THROUGH 0x00000008 | |
79 | #define PAGE_FLAGS_CACHE_DISABLE 0x00000010 | |
80 | #define PAGE_FLAGS_ACCESSED 0x00000020 | |
81 | #define PAGE_FLAGS_DIRTY 0x00000040 | |
82 | #define PAGE_FLAGS_4MB 0x00000080 | |
83 | ||
84 | /**************************************************************************** | |
85 | PARAMETERS: | |
86 | base - Physical base address of the memory to maps in | |
87 | limit - Limit of physical memory to region to maps in | |
88 | ||
89 | RETURNS: | |
90 | Linear address of the newly mapped memory. | |
91 | ||
92 | REMARKS: | |
93 | Maps a physical memory range to a linear memory range. | |
94 | ****************************************************************************/ | |
95 | static ulong _PM_mapPhysicalToLinear( | |
96 | ulong base, | |
97 | ulong limit, | |
98 | ibool isCached) | |
99 | { | |
100 | ulong length = limit+1; | |
101 | PHYSICAL_ADDRESS paIoBase = {0}; | |
102 | ||
8bde7f77 | 103 | /* NT loves large Ints */ |
c7de829c WD |
104 | paIoBase = RtlConvertUlongToLargeInteger( base ); |
105 | ||
8bde7f77 | 106 | /* Map IO space into Kernel */ |
c7de829c | 107 | if (isCached) |
8bde7f77 | 108 | return (ULONG)MmMapIoSpace(paIoBase, length, MmCached ); |
c7de829c | 109 | else |
8bde7f77 | 110 | return (ULONG)MmMapIoSpace(paIoBase, length, MmNonCached ); |
c7de829c WD |
111 | } |
112 | ||
113 | /**************************************************************************** | |
114 | REMARKS: | |
115 | Adjust the page table caching bits directly. Requires ring 0 access and | |
116 | only works with DOS4GW and compatible extenders (CauseWay also works since | |
117 | it has direct support for the ring 0 instructions we need from ring 3). Will | |
118 | not work in a DOS box, but we call into the ring 0 helper VxD so we should | |
119 | never get here in a DOS box anyway (assuming the VxD is present). If we | |
120 | do get here and we are in windows, this code will be skipped. | |
121 | ****************************************************************************/ | |
122 | static void _PM_adjustPageTables( | |
123 | ulong linear, | |
124 | ulong limit, | |
125 | ibool isGlobal, | |
126 | ibool isCached) | |
127 | { | |
128 | int startPDB,endPDB,iPDB,startPage,endPage,start,end,iPage; | |
129 | ulong pageTable,*pPDB,*pPageTable; | |
130 | ulong mask = 0xFFFFFFFF; | |
131 | ulong bits = 0x00000000; | |
132 | ||
133 | /* Enable user level access for page table entry */ | |
134 | if (isGlobal) { | |
135 | mask &= ~PAGE_FLAGS_USER; | |
136 | bits |= PAGE_FLAGS_USER; | |
137 | } | |
8bde7f77 | 138 | |
c7de829c WD |
139 | /* Disable PCD bit if page table entry should be uncached */ |
140 | if (!isCached) { | |
141 | mask &= ~(PAGE_FLAGS_CACHE_DISABLE | PAGE_FLAGS_WRITE_THROUGH); | |
142 | bits |= (PAGE_FLAGS_CACHE_DISABLE | PAGE_FLAGS_WRITE_THROUGH); | |
143 | } | |
144 | ||
145 | pPDB = (ulong*)_PM_mapPhysicalToLinear(_PM_getPDB(),0xFFF,true); | |
146 | if (pPDB) { | |
8bde7f77 WD |
147 | startPDB = (linear >> 22) & 0x3FF; |
148 | startPage = (linear >> 12) & 0x3FF; | |
149 | endPDB = ((linear+limit) >> 22) & 0x3FF; | |
150 | endPage = ((linear+limit) >> 12) & 0x3FF; | |
151 | for (iPDB = startPDB; iPDB <= endPDB; iPDB++) { | |
152 | /* Set the bits in the page directory entry - required as per */ | |
153 | /* Pentium 4 manual. This also takes care of the 4MB page entries */ | |
154 | pPDB[iPDB] = (pPDB[iPDB] & mask) | bits; | |
155 | if (!(pPDB[iPDB] & PAGE_FLAGS_4MB)) { | |
156 | /* If we are dealing with 4KB pages then we need to iterate */ | |
157 | /* through each of the page table entries */ | |
158 | pageTable = pPDB[iPDB] & ~0xFFF; | |
159 | pPageTable = (ulong*)_PM_mapPhysicalToLinear(pageTable,0xFFF,true); | |
160 | start = (iPDB == startPDB) ? startPage : 0; | |
161 | end = (iPDB == endPDB) ? endPage : 0x3FF; | |
162 | for (iPage = start; iPage <= end; iPage++) { | |
163 | pPageTable[iPage] = (pPageTable[iPage] & mask) | bits; | |
164 | } | |
165 | MmUnmapIoSpace(pPageTable,0xFFF); | |
166 | } | |
167 | } | |
168 | MmUnmapIoSpace(pPDB,0xFFF); | |
169 | PM_flushTLB(); | |
170 | } | |
c7de829c WD |
171 | } |
172 | ||
173 | /**************************************************************************** | |
174 | REMARKS: | |
175 | Allocate a block of shared memory. For NT we allocate shared memory | |
176 | as locked, global memory that is accessible from any memory context | |
177 | (including interrupt time context), which allows us to load our important | |
178 | data structure and code such that we can access it directly from a ring | |
179 | 0 interrupt context. | |
180 | ****************************************************************************/ | |
181 | void * PMAPI PM_mallocShared( | |
182 | long size) | |
183 | { | |
184 | int i; | |
185 | ||
8bde7f77 | 186 | /* First find a free slot in our shared memory table */ |
c7de829c | 187 | for (i = 0; i < MAX_MEMORY_SHARED; i++) { |
8bde7f77 WD |
188 | if (shared[i].linear == 0) |
189 | break; | |
190 | } | |
c7de829c | 191 | if (i == MAX_MEMORY_SHARED) |
8bde7f77 | 192 | return NULL; |
c7de829c | 193 | |
8bde7f77 | 194 | /* Allocate the paged pool */ |
c7de829c WD |
195 | shared[i].linear = ExAllocatePool(PagedPool, size); |
196 | ||
8bde7f77 | 197 | /* Create a list to manage this allocation */ |
c7de829c WD |
198 | shared[i].pMdl = IoAllocateMdl(shared[i].linear,size,FALSE,FALSE,(PIRP) NULL); |
199 | ||
8bde7f77 | 200 | /* Lock this allocation in memory */ |
c7de829c WD |
201 | MmProbeAndLockPages(shared[i].pMdl,KernelMode,IoModifyAccess); |
202 | ||
8bde7f77 | 203 | /* Modify bits to grant user access */ |
c7de829c WD |
204 | _PM_adjustPageTables((ulong)shared[i].linear, size, true, true); |
205 | return (void*)shared[i].linear; | |
206 | } | |
207 | ||
208 | /**************************************************************************** | |
209 | REMARKS: | |
210 | Free a block of shared memory | |
211 | ****************************************************************************/ | |
212 | void PMAPI PM_freeShared( | |
213 | void *p) | |
214 | { | |
215 | int i; | |
216 | ||
8bde7f77 | 217 | /* Find a shared memory block in our table and free it */ |
c7de829c | 218 | for (i = 0; i < MAX_MEMORY_SHARED; i++) { |
8bde7f77 WD |
219 | if (shared[i].linear == p) { |
220 | /* Unlock what we locked */ | |
221 | MmUnlockPages(shared[i].pMdl); | |
c7de829c | 222 | |
8bde7f77 WD |
223 | /* Free our MDL */ |
224 | IoFreeMdl(shared[i].pMdl); | |
c7de829c | 225 | |
8bde7f77 WD |
226 | /* Free our mem */ |
227 | ExFreePool(shared[i].linear); | |
c7de829c | 228 | |
8bde7f77 WD |
229 | /* Flag that is entry is available */ |
230 | shared[i].linear = 0; | |
231 | break; | |
232 | } | |
233 | } | |
c7de829c WD |
234 | } |
235 | ||
236 | /**************************************************************************** | |
237 | REMARKS: | |
238 | Map a physical address to a linear address in the callers process. | |
239 | ****************************************************************************/ | |
240 | void * PMAPI PM_mapPhysicalAddr( | |
241 | ulong base, | |
242 | ulong limit, | |
243 | ibool isCached) | |
244 | { | |
245 | ulong linear,length = limit+1; | |
246 | int i; | |
247 | ||
8bde7f77 WD |
248 | /* Search table of existing mappings to see if we have already mapped */ |
249 | /* a region of memory that will serve this purpose. */ | |
c7de829c | 250 | for (i = 0; i < numMappings; i++) { |
8bde7f77 WD |
251 | if (maps[i].physical == base && maps[i].length == length && maps[i].isCached == isCached) { |
252 | _PM_adjustPageTables((ulong)maps[i].linear, maps[i].length, true, isCached); | |
253 | return (void*)maps[i].linear; | |
254 | } | |
255 | } | |
c7de829c | 256 | if (numMappings == MAX_MEMORY_MAPPINGS) |
8bde7f77 | 257 | return NULL; |
c7de829c | 258 | |
8bde7f77 | 259 | /* We did not find any previously mapped memory region, so maps it in. */ |
c7de829c | 260 | if ((linear = _PM_mapPhysicalToLinear(base,limit,isCached)) == 0xFFFFFFFF) |
8bde7f77 | 261 | return NULL; |
c7de829c WD |
262 | maps[numMappings].physical = base; |
263 | maps[numMappings].length = length; | |
264 | maps[numMappings].linear = linear; | |
265 | maps[numMappings].isCached = isCached; | |
266 | numMappings++; | |
267 | ||
8bde7f77 | 268 | /* Grant user access to this I/O space */ |
c7de829c WD |
269 | _PM_adjustPageTables((ulong)linear, length, true, isCached); |
270 | return (void*)linear; | |
271 | } | |
272 | ||
273 | /**************************************************************************** | |
274 | REMARKS: | |
275 | Free a physical address mapping allocated by PM_mapPhysicalAddr. | |
276 | ****************************************************************************/ | |
277 | void PMAPI PM_freePhysicalAddr( | |
278 | void *ptr, | |
279 | ulong limit) | |
280 | { | |
8bde7f77 WD |
281 | /* We don't free the memory mappings in here because we cache all */ |
282 | /* the memory mappings we create in the system for later use. */ | |
c7de829c WD |
283 | } |
284 | ||
285 | /**************************************************************************** | |
286 | REMARKS: | |
287 | Called when the device driver unloads to free all the page table mappings! | |
288 | ****************************************************************************/ | |
289 | void PMAPI _PM_freeMemoryMappings(void) | |
290 | { | |
291 | int i; | |
292 | ||
293 | for (i = 0; i < numMappings; i++) | |
8bde7f77 | 294 | MmUnmapIoSpace((void *)maps[i].linear,maps[i].length); |
c7de829c WD |
295 | } |
296 | ||
297 | /**************************************************************************** | |
298 | REMARKS: | |
299 | Find the physical address of a linear memory address in current process. | |
300 | ****************************************************************************/ | |
301 | ulong PMAPI PM_getPhysicalAddr( | |
302 | void *p) | |
303 | { | |
304 | PHYSICAL_ADDRESS paOurAddress; | |
305 | ||
306 | paOurAddress = MmGetPhysicalAddress(p); | |
307 | return paOurAddress.LowPart; | |
308 | } | |
309 | ||
310 | /**************************************************************************** | |
311 | REMARKS: | |
312 | Find the physical address of a linear memory address in current process. | |
313 | ****************************************************************************/ | |
314 | ibool PMAPI PM_getPhysicalAddrRange( | |
315 | void *p, | |
316 | ulong length, | |
317 | ulong *physAddress) | |
318 | { | |
319 | int i; | |
320 | ulong linear = (ulong)p & ~0xFFF; | |
321 | ||
322 | for (i = (length + 0xFFF) >> 12; i > 0; i--) { | |
8bde7f77 WD |
323 | if ((*physAddress++ = PM_getPhysicalAddr((void*)linear)) == 0xFFFFFFFF) |
324 | return false; | |
325 | linear += 4096; | |
326 | } | |
c7de829c WD |
327 | return true; |
328 | } | |
329 | ||
330 | /**************************************************************************** | |
331 | REMARKS: | |
332 | Allocates a block of locked physical memory. | |
333 | ****************************************************************************/ | |
334 | void * PMAPI PM_allocLockedMem( | |
335 | uint size, | |
336 | ulong *physAddr, | |
337 | ibool contiguous, | |
338 | ibool below16M) | |
339 | { | |
340 | int i; | |
341 | PHYSICAL_ADDRESS paOurAddress; | |
342 | ||
8bde7f77 | 343 | /* First find a free slot in our shared memory table */ |
c7de829c | 344 | for (i = 0; i < MAX_MEMORY_LOCKED; i++) { |
8bde7f77 WD |
345 | if (locked[i].linear == 0) |
346 | break; | |
347 | } | |
c7de829c | 348 | if (i == MAX_MEMORY_LOCKED) |
8bde7f77 | 349 | return NULL; |
c7de829c | 350 | |
8bde7f77 WD |
351 | /* HighestAcceptableAddress - Specifies the highest valid physical address */ |
352 | /* the driver can use. For example, if a device can only reference physical */ | |
353 | /* memory in the lower 16MB, this value would be set to 0x00000000FFFFFF. */ | |
c7de829c WD |
354 | paOurAddress.HighPart = 0; |
355 | if (below16M) | |
8bde7f77 | 356 | paOurAddress.LowPart = 0x00FFFFFF; |
c7de829c | 357 | else |
8bde7f77 | 358 | paOurAddress.LowPart = 0xFFFFFFFF; |
c7de829c WD |
359 | |
360 | if (contiguous) { | |
8bde7f77 WD |
361 | /* Allocate from the non-paged pool (unfortunately 4MB pages) */ |
362 | locked[i].linear = MmAllocateContiguousMemory(size, paOurAddress); | |
363 | if (!locked[i].linear) | |
364 | return NULL; | |
c7de829c | 365 | |
8bde7f77 WD |
366 | /* Flag no MDL */ |
367 | locked[i].pMdl = NULL; | |
c7de829c | 368 | |
8bde7f77 WD |
369 | /* Map the physical address for the memory so we can manage */ |
370 | /* the page tables in 4KB chunks mapped into user space. */ | |
c7de829c | 371 | |
8bde7f77 WD |
372 | /* TODO: Map this with the physical address to the linear addresss */ |
373 | locked[i].mmIoMapped = locked[i].linear; | |
c7de829c | 374 | |
8bde7f77 WD |
375 | /* Modify bits to grant user access, flag not cached */ |
376 | _PM_adjustPageTables((ulong)locked[i].mmIoMapped, size, true, false); | |
377 | return (void*)locked[i].mmIoMapped; | |
378 | } | |
c7de829c | 379 | else { |
8bde7f77 WD |
380 | /* Allocate from the paged pool */ |
381 | locked[i].linear = ExAllocatePool(PagedPool, size); | |
382 | if (!locked[i].linear) | |
383 | return NULL; | |
c7de829c | 384 | |
8bde7f77 WD |
385 | /* Create a list to manage this allocation */ |
386 | locked[i].pMdl = IoAllocateMdl(locked[i].linear,size,FALSE,FALSE,(PIRP) NULL); | |
c7de829c | 387 | |
8bde7f77 WD |
388 | /* Lock this allocation in memory */ |
389 | MmProbeAndLockPages(locked[i].pMdl,KernelMode,IoModifyAccess); | |
c7de829c | 390 | |
8bde7f77 WD |
391 | /* Modify bits to grant user access, flag not cached */ |
392 | _PM_adjustPageTables((ulong)locked[i].linear, size, true, false); | |
393 | return (void*)locked[i].linear; | |
394 | } | |
c7de829c WD |
395 | } |
396 | ||
397 | /**************************************************************************** | |
398 | REMARKS: | |
399 | Frees a block of locked physical memory. | |
400 | ****************************************************************************/ | |
401 | void PMAPI PM_freeLockedMem( | |
402 | void *p, | |
403 | uint size, | |
404 | ibool contiguous) | |
405 | { | |
406 | int i; | |
407 | ||
408 | /* Find a locked memory block in our table and free it */ | |
409 | for (i = 0; i < MAX_MEMORY_LOCKED; i++) { | |
8bde7f77 WD |
410 | if (locked[i].linear == p) { |
411 | /* An Mdl indicates that we used the paged pool, and locked it, */ | |
412 | /* so now we have to unlock, free the MDL, and free paged */ | |
413 | if (locked[i].pMdl) { | |
414 | /* Unlock what we locked and free the Mdl */ | |
415 | MmUnlockPages(locked[i].pMdl); | |
416 | IoFreeMdl(locked[i].pMdl); | |
417 | ExFreePool(locked[i].linear); | |
418 | } | |
419 | else { | |
420 | /* TODO: Free the mmIoMap mapping for the memory! */ | |
421 | ||
422 | /* Free non-paged pool */ | |
423 | MmFreeContiguousMemory(locked[i].linear); | |
424 | } | |
425 | ||
426 | /* Flag that is entry is available */ | |
427 | locked[i].linear = 0; | |
428 | break; | |
429 | } | |
430 | } | |
c7de829c WD |
431 | } |
432 | ||
433 | /**************************************************************************** | |
434 | REMARKS: | |
435 | Allocates a page aligned and page sized block of memory | |
436 | ****************************************************************************/ | |
437 | void * PMAPI PM_allocPage( | |
438 | ibool locked) | |
439 | { | |
8bde7f77 WD |
440 | /* Allocate the memory from the non-paged pool if we want the memory */ |
441 | /* to be locked. */ | |
c7de829c | 442 | return ExAllocatePool( |
8bde7f77 WD |
443 | locked ? NonPagedPoolCacheAligned : PagedPoolCacheAligned, |
444 | PAGE_SIZE); | |
c7de829c WD |
445 | } |
446 | ||
447 | /**************************************************************************** | |
448 | REMARKS: | |
449 | Free a page aligned and page sized block of memory | |
450 | ****************************************************************************/ | |
451 | void PMAPI PM_freePage( | |
452 | void *p) | |
453 | { | |
454 | if (p) ExFreePool(p); | |
455 | } | |
456 | ||
457 | /**************************************************************************** | |
458 | REMARKS: | |
459 | Lock linear memory so it won't be paged. | |
460 | ****************************************************************************/ | |
461 | int PMAPI PM_lockDataPages( | |
462 | void *p, | |
463 | uint len, | |
464 | PM_lockHandle *lh) | |
465 | { | |
466 | MDL *pMdl; | |
467 | ||
8bde7f77 | 468 | /* Create a list to manage this allocation */ |
c7de829c | 469 | if ((pMdl = IoAllocateMdl(p,len,FALSE,FALSE,(PIRP)NULL)) == NULL) |
8bde7f77 | 470 | return false; |
c7de829c | 471 | |
8bde7f77 | 472 | /* Lock this allocation in memory */ |
c7de829c WD |
473 | MmProbeAndLockPages(pMdl,KernelMode,IoModifyAccess); |
474 | *((PMDL*)(&lh->h)) = pMdl; | |
475 | return true; | |
476 | } | |
477 | ||
478 | /**************************************************************************** | |
479 | REMARKS: | |
480 | Unlock linear memory so it won't be paged. | |
481 | ****************************************************************************/ | |
482 | int PMAPI PM_unlockDataPages( | |
483 | void *p, | |
484 | uint len, | |
485 | PM_lockHandle *lh) | |
486 | { | |
487 | if (p && lh) { | |
8bde7f77 WD |
488 | /* Unlock what we locked */ |
489 | MDL *pMdl = *((PMDL*)(&lh->h)); | |
490 | MmUnlockPages(pMdl); | |
491 | IoFreeMdl(pMdl); | |
492 | } | |
c7de829c WD |
493 | return true; |
494 | } | |
495 | ||
496 | /**************************************************************************** | |
497 | REMARKS: | |
498 | Lock linear memory so it won't be paged. | |
499 | ****************************************************************************/ | |
500 | int PMAPI PM_lockCodePages( | |
501 | void (*p)(), | |
502 | uint len, | |
503 | PM_lockHandle *lh) | |
504 | { | |
505 | return PM_lockDataPages((void*)p,len,lh); | |
506 | } | |
507 | ||
508 | /**************************************************************************** | |
509 | REMARKS: | |
510 | Unlock linear memory so it won't be paged. | |
511 | ****************************************************************************/ | |
512 | int PMAPI PM_unlockCodePages( | |
513 | void (*p)(), | |
514 | uint len, | |
515 | PM_lockHandle *lh) | |
516 | { | |
517 | return PM_unlockDataPages((void*)p,len,lh); | |
518 | } |