Pyrogenesis  13997
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
wvm.cpp
Go to the documentation of this file.
1 /* Copyright (c) 2011 Wildfire Games
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining
4  * a copy of this software and associated documentation files (the
5  * "Software"), to deal in the Software without restriction, including
6  * without limitation the rights to use, copy, modify, merge, publish,
7  * distribute, sublicense, and/or sell copies of the Software, and to
8  * permit persons to whom the Software is furnished to do so, subject to
9  * the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included
12  * in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
17  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
18  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
19  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
20  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 /*
24  * virtual memory interface. supercedes POSIX mmap; provides support for
25  * large pages, autocommit, and specifying protection flags during allocation.
26  */
27 
28 #include "precompiled.h"
29 #include "lib/sysdep/vm.h"
30 
32 #include <excpt.h>
33 
34 #include "lib/timer.h"
35 #include "lib/bits.h" // round_down
36 #include "lib/alignment.h" // CACHE_ALIGNED
37 #include "lib/module_init.h"
38 #include "lib/sysdep/cpu.h" // cpu_AtomicAdd
39 #include "lib/sysdep/numa.h"
40 #include "lib/sysdep/arch/x86_x64/x86_x64.h" // x86_x64::ApicId
41 #include "lib/sysdep/arch/x86_x64/apic.h" // ProcessorFromApicId
45 
46 
47 //-----------------------------------------------------------------------------
48 // functions not supported by 32-bit Windows XP
49 
50 static WUTIL_FUNC(pGetCurrentProcessorNumber, DWORD, (VOID));
51 static WUTIL_FUNC(pGetNumaProcessorNode, BOOL, (UCHAR, PUCHAR));
52 static WUTIL_FUNC(pVirtualAllocExNuma, LPVOID, (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD));
53 
55 {
56  const ApicId apicId = GetApicId();
57  const DWORD processor = (DWORD)ProcessorFromApicId(apicId);
58  ASSERT(processor < os_cpu_MaxProcessors);
59  return processor;
60 }
61 
62 static BOOL WINAPI EmulateGetNumaProcessorNode(UCHAR UNUSED(processor), PUCHAR node)
63 {
64  // given that the system doesn't support GetNumaProcessorNode,
65  // it will also lack VirtualAllocExNuma, so the node value we assign
66  // is ignored by EmulateVirtualAllocExNuma.
67  *node = 0;
68  return TRUE;
69 }
70 
71 static LPVOID WINAPI EmulateVirtualAllocExNuma(HANDLE UNUSED(hProcess), LPVOID p, SIZE_T size, DWORD allocationType, DWORD protect, DWORD UNUSED(node))
72 {
73  return VirtualAlloc(p, size, allocationType, protect);
74 }
75 
76 
77 static Status wvm_Init()
78 {
79  WUTIL_IMPORT_KERNEL32(GetCurrentProcessorNumber, pGetCurrentProcessorNumber);
80  WUTIL_IMPORT_KERNEL32(GetNumaProcessorNode, pGetNumaProcessorNode);
81  WUTIL_IMPORT_KERNEL32(VirtualAllocExNuma, pVirtualAllocExNuma);
82 
83  if(!pGetCurrentProcessorNumber)
84  pGetCurrentProcessorNumber = &EmulateGetCurrentProcessorNumber;
85  if(!pGetNumaProcessorNode)
86  pGetNumaProcessorNode = &EmulateGetNumaProcessorNode;
87  if(!pVirtualAllocExNuma)
88  pVirtualAllocExNuma = &EmulateVirtualAllocExNuma;
89 
90  return INFO::OK;
91 }
92 
93 
94 namespace vm {
95 
96 
97 //-----------------------------------------------------------------------------
98 // per-processor statistics
99 
100 // (alignment avoids false sharing)
101 CACHE_ALIGNED(struct Statistics) // POD
102 {
103  // thread-safe (required due to concurrent commits)
104  void NotifyLargePageCommit()
105  {
106  cpu_AtomicAdd(&largePageCommits, +1);
107  }
108 
109  void NotifySmallPageCommit()
110  {
111  cpu_AtomicAdd(&smallPageCommits, +1);
112  }
113 
114  intptr_t largePageCommits;
115  intptr_t smallPageCommits;
116 };
117 static CACHE_ALIGNED(Statistics) statistics[os_cpu_MaxProcessors];
118 
119 void DumpStatistics()
120 {
121  ENSURE(IsAligned(&statistics[0], cacheLineSize));
122  ENSURE(IsAligned(&statistics[1], cacheLineSize));
123 
124  size_t smallPageCommits = 0;
125  size_t largePageCommits = 0;
126  uintptr_t processorsWithNoCommits = 0;
127  for(size_t processor = 0; processor < os_cpu_NumProcessors(); processor++)
128  {
129  const Statistics& s = statistics[processor];
130  if(s.smallPageCommits == 0 && s.largePageCommits == 0)
131  processorsWithNoCommits |= Bit<uintptr_t>(processor);
132  smallPageCommits += s.smallPageCommits;
133  largePageCommits += s.largePageCommits;
134  }
135 
136  const size_t totalCommits = smallPageCommits+largePageCommits;
137  if(totalCommits == 0) // this module wasn't used => don't print debug output
138  return;
139 
140  const size_t largePageRatio = totalCommits? largePageCommits*100/totalCommits : 0;
141  debug_printf(L"%d commits (%d, i.e. %d%% of them via large pages)\n", totalCommits, largePageCommits, largePageRatio);
142  if(processorsWithNoCommits != 0)
143  debug_printf(L" processors with no commits: %x\n", processorsWithNoCommits);
144 
145  if(numa_NumNodes() > 1)
146  debug_printf(L"NUMA factor: %.2f\n", numa_Factor());
147 }
148 
149 
150 //-----------------------------------------------------------------------------
151 // allocator with large-page and NUMA support
152 
153 static bool largePageAllocationTookTooLong = false;
154 
155 static bool ShouldUseLargePages(size_t allocationSize, DWORD allocationType, PageType pageType)
156 {
157  // don't even check for large page support.
158  if(pageType == kSmall)
159  return false;
160 
161  // can't use large pages when reserving - VirtualAlloc would fail with
162  // ERROR_INVALID_PARAMETER.
163  if((allocationType & MEM_COMMIT) == 0)
164  return false;
165 
166  // OS lacks support for large pages.
167  if(os_cpu_LargePageSize() == 0)
168  return false;
169 
170  // large pages are available and application wants them used.
171  if(pageType == kLarge)
172  return true;
173 
174  // default: use a heuristic.
175  {
176  // internal fragmentation would be excessive.
177  if(allocationSize <= largePageSize/2)
178  return false;
179 
180  // a previous attempt already took too long.
182  return false;
183 
184  // pre-Vista Windows OSes attempt to cope with page fragmentation by
185  // trimming the working set of all processes, thus swapping them out,
186  // and waiting for contiguous regions to appear. this is terribly
187  // slow (multiple seconds), hence the following heuristic:
189  {
190  // if there's not plenty of free memory, then memory is surely
191  // already fragmented.
192  if(os_cpu_MemoryAvailable() < 2000) // 2 GB
193  return false;
194  }
195  }
196 
197  return true;
198 }
199 
200 
201 // used for reserving address space, committing pages, or both.
202 static void* AllocateLargeOrSmallPages(uintptr_t address, size_t size, DWORD allocationType, PageType pageType = kDefault, int prot = PROT_READ|PROT_WRITE)
203 {
204  const HANDLE hProcess = GetCurrentProcess();
205  const DWORD protect = MemoryProtectionFromPosix(prot);
206 
207  UCHAR node;
208  const DWORD processor = pGetCurrentProcessorNumber();
209  WARN_IF_FALSE(pGetNumaProcessorNode((UCHAR)processor, &node));
210 
211  if(ShouldUseLargePages(size, allocationType, pageType))
212  {
213  // MEM_LARGE_PAGES requires aligned addresses and sizes
214  const size_t largePageSize = os_cpu_LargePageSize();
215  const uintptr_t alignedAddress = round_down(address, largePageSize);
216  const size_t alignedSize = round_up(size+largePageSize-1, largePageSize);
217  // note: this call can take SECONDS, which is why several checks are
218  // undertaken before we even try. these aren't authoritative, so we
219  // at least prevent future attempts if it takes too long.
220  const double startTime = timer_Time(); COMPILER_FENCE;
221  void* largePages = pVirtualAllocExNuma(hProcess, LPVOID(alignedAddress), alignedSize, allocationType|MEM_LARGE_PAGES, protect, node);
222  const double elapsedTime = timer_Time() - startTime; COMPILER_FENCE;
223  if(elapsedTime > 0.5)
224  largePageAllocationTookTooLong = true; // avoid large pages next time
225  if(largePages)
226  {
227  if((allocationType & MEM_COMMIT) != 0)
228  statistics[processor].NotifyLargePageCommit();
229  return largePages;
230  }
231  }
232 
233  // try (again) with regular pages
234  void* smallPages = pVirtualAllocExNuma(hProcess, LPVOID(address), size, allocationType, protect, node);
235  if(smallPages)
236  {
237  if((allocationType & MEM_COMMIT) != 0)
238  statistics[processor].NotifySmallPageCommit();
239  return smallPages;
240  }
241  else
242  {
243  MEMORY_BASIC_INFORMATION mbi = {0};
244  (void)VirtualQuery(LPCVOID(address), &mbi, sizeof(mbi)); // return value is #bytes written in mbi
245  debug_printf(L"Allocation failed: base=%p allocBase=%p allocProt=%d size=%d state=%d prot=%d type=%d\n", mbi.BaseAddress, mbi.AllocationBase, mbi.AllocationProtect, mbi.RegionSize, mbi.State, mbi.Protect, mbi.Type);
246  }
247 
248  return 0;
249 }
250 
251 
252 //-----------------------------------------------------------------------------
253 // address space reservation
254 
255 // indicates the extent of a range of address space,
256 // and the parameters for committing large/small pages in it.
257 //
258 // this bookkeeping information increases the safety of on-demand commits,
259 // enables different parameters for separate allocations, and allows
260 // variable alignment because it retains the original base address.
261 // (storing this information within the allocated memory would
262 // require mapping an additional page and may waste an entire
263 // large page if the base address happens to be aligned already.)
264 CACHE_ALIGNED(struct AddressRangeDescriptor) // POD
265 {
266  // attempt to activate this descriptor and reserve address space.
267  // side effect: initializes all fields if successful.
268  //
269  // @param size, commitSize, pageType, prot - see ReserveAddressSpace.
270  // @return INFO::SKIPPED if this descriptor is already in use,
271  // INFO::OK on success, otherwise ERR::NO_MEM (after showing an
272  // error message).
273  Status Allocate(size_t size, size_t commitSize, PageType pageType, int prot)
274  {
275  // if this descriptor wasn't yet in use, mark it as busy
276  // (double-checking is cheaper than cpu_CAS)
277  if(base != 0 || !cpu_CAS(&base, intptr_t(0), intptr_t(this)))
278  return INFO::SKIPPED;
279 
280  ENSURE(size != 0); // probably indicates a bug in caller
281  ENSURE((commitSize % largePageSize) == 0 || pageType == kSmall);
282  ASSERT(pageType == kLarge || pageType == kSmall || pageType == kDefault);
283  ASSERT(prot == PROT_NONE || (prot & ~(PROT_READ|PROT_WRITE|PROT_EXEC)) == 0);
284  this->commitSize = commitSize;
285  this->pageType = pageType;
286  this->prot = prot;
287 
288  alignment = (pageType == kSmall)? pageSize : largePageSize;
289  totalSize = round_up(size+alignment-1, alignment);
290 
291  // NB: it is meaningless to ask for large pages when reserving
292  // (see ShouldUseLargePages). pageType only affects subsequent commits.
293  base = (intptr_t)AllocateLargeOrSmallPages(0, totalSize, MEM_RESERVE);
294  if(!base)
295  {
296  debug_printf(L"AllocateLargeOrSmallPages of %lld failed\n", (u64)totalSize);
298  return ERR::NO_MEM; // NOWARN (error string is more helpful)
299  }
300 
301  alignedBase = round_up(uintptr_t(base), alignment);
302  alignedEnd = alignedBase + round_up(size, alignment);
303  return INFO::OK;
304  }
305 
306  void Free()
307  {
308  vm::Free((void*)base, totalSize);
309  alignment = alignedBase = alignedEnd = 0;
310  totalSize = 0;
312  base = 0; // release descriptor for subsequent reuse
313  }
314 
315  bool Contains(uintptr_t address) const
316  {
317  // safety check: we should never see pointers in the no-man's-land
318  // between the original and rounded up base addresses.
319  ENSURE(!(uintptr_t(base) <= address && address < alignedBase));
320 
321  return (alignedBase <= address && address < alignedEnd);
322  }
323 
324  bool Commit(uintptr_t address)
325  {
326  // (safe because Allocate rounded up to alignment)
327  const uintptr_t alignedAddress = round_down(address, alignment);
328  ENSURE(alignedBase <= alignedAddress && alignedAddress+commitSize <= alignedEnd);
329  return vm::Commit(alignedAddress, commitSize, pageType, prot);
330  }
331 
332  // corresponds to the respective page size (Windows requires
333  // naturally aligned addresses and sizes when committing large pages).
334  // note that VirtualAlloc's alignment defaults to 64 KiB.
335  uintptr_t alignment;
336 
337  uintptr_t alignedBase; // multiple of alignment
338  uintptr_t alignedEnd; // "
339 
340  // (actual requested size / allocated address is required by
341  // ReleaseAddressSpace due to variable alignment.)
342  volatile intptr_t base; // (type is dictated by cpu_CAS)
343  size_t totalSize;
344 
345  // parameters to be relayed to vm::Commit
346  size_t commitSize;
347  PageType pageType;
348  int prot;
349 
350 //private:
351  static const wchar_t* ErrorString()
352  {
353 #if ARCH_IA32
354  return L"Out of address space (64-bit OS may help)";
355 #elif OS_WIN
356  // because early AMD64 lacked CMPXCHG16B, the Windows lock-free slist
357  // must squeeze the address, ABA tag and list length (a questionable
358  // design decision) into 64 bits. that leaves 39 bits for the
359  // address, plus 4 implied zero bits due to 16-byte alignment.
360  // [http://www.alex-ionescu.com/?p=50]
361  return L"Out of address space (Windows only provides 8 TiB)";
362 #else
363  return L"Out of address space";
364 #endif
365  }
366 };
367 
368 // (array size governs the max. number of extant allocations)
369 static AddressRangeDescriptor ranges[2*os_cpu_MaxProcessors];
370 
371 
372 static AddressRangeDescriptor* FindDescriptor(uintptr_t address)
373 {
374  for(size_t idxRange = 0; idxRange < ARRAY_SIZE(ranges); idxRange++)
375  {
376  AddressRangeDescriptor& d = ranges[idxRange];
377  if(d.Contains(address))
378  return &d;
379  }
380 
381  return 0; // not contained in any allocated ranges
382 }
383 
384 
385 void* ReserveAddressSpace(size_t size, size_t commitSize, PageType pageType, int prot)
386 {
387  for(size_t idxRange = 0; idxRange < ARRAY_SIZE(ranges); idxRange++)
388  {
389  Status ret = ranges[idxRange].Allocate(size, commitSize, pageType, prot);
390  if(ret == INFO::OK)
391  return (void*)ranges[idxRange].alignedBase;
392  if(ret == ERR::NO_MEM)
393  return 0;
394  // else: descriptor already in use, try the next one
395  }
396 
397  // all descriptors are in use; ranges[] was too small
399  return 0;
400 }
401 
402 
403 void ReleaseAddressSpace(void* p, size_t UNUSED(size))
404 {
405  // it is customary to ignore null pointers
406  if(!p)
407  return;
408 
409  AddressRangeDescriptor* d = FindDescriptor(uintptr_t(p));
410  if(d)
411  d->Free();
412  else
413  {
414  debug_printf(L"No AddressRangeDescriptor contains %P\n", p);
415  ENSURE(0);
416  }
417 }
418 
419 
420 //-----------------------------------------------------------------------------
421 // commit/decommit, allocate/free, protect
422 
423 TIMER_ADD_CLIENT(tc_commit);
424 
425 bool Commit(uintptr_t address, size_t size, PageType pageType, int prot)
426 {
427  TIMER_ACCRUE_ATOMIC(tc_commit);
428 
429  return AllocateLargeOrSmallPages(address, size, MEM_COMMIT, pageType, prot) != 0;
430 }
431 
432 
433 bool Decommit(uintptr_t address, size_t size)
434 {
435  return VirtualFree(LPVOID(address), size, MEM_DECOMMIT) != FALSE;
436 }
437 
438 
439 bool Protect(uintptr_t address, size_t size, int prot)
440 {
441  const DWORD protect = MemoryProtectionFromPosix(prot);
442  DWORD oldProtect; // required by VirtualProtect
443  const BOOL ok = VirtualProtect(LPVOID(address), size, protect, &oldProtect);
444  return ok != FALSE;
445 }
446 
447 
448 void* Allocate(size_t size, PageType pageType, int prot)
449 {
450  return AllocateLargeOrSmallPages(0, size, MEM_RESERVE|MEM_COMMIT, pageType, prot);
451 }
452 
453 
454 void Free(void* p, size_t UNUSED(size))
455 {
456  if(p) // otherwise, VirtualFree complains
457  {
458  const BOOL ok = VirtualFree(p, 0, MEM_RELEASE);
459  WARN_IF_FALSE(ok);
460  }
461 }
462 
463 
464 //-----------------------------------------------------------------------------
465 // on-demand commit
466 
467 // NB: avoid using debug_printf here because OutputDebugString has been
468 // observed to generate vectored exceptions when running outside the IDE.
469 static LONG CALLBACK VectoredHandler(const PEXCEPTION_POINTERS ep)
470 {
471  const PEXCEPTION_RECORD er = ep->ExceptionRecord;
472 
473  // we only want to handle access violations. (strictly speaking,
474  // unmapped memory causes page faults, but Windows reports them
475  // with EXCEPTION_ACCESS_VIOLATION.)
476  if(er->ExceptionCode != EXCEPTION_ACCESS_VIOLATION)
477  return EXCEPTION_CONTINUE_SEARCH;
478 
479  // NB: read exceptions are legitimate and occur when updating an
480  // accumulator for the first time.
481 
482  // get the source/destination of the read/write operation that
483  // failed. (NB: don't use er->ExceptionAddress - that's the
484  // location of the code that encountered the fault)
485  const uintptr_t address = (uintptr_t)er->ExceptionInformation[1];
486 
487  // if unknown (e.g. access violation in kernel address space or
488  // violation of alignment requirements), we don't want to handle it.
489  if(address == ~uintptr_t(0))
490  return EXCEPTION_CONTINUE_SEARCH;
491 
492  // the address space must have been allocated by ReserveAddressSpace
493  // (otherwise we wouldn't know the desired commitSize/pageType/prot).
494  AddressRangeDescriptor* d = FindDescriptor(address);
495  if(!d)
496  return EXCEPTION_CONTINUE_SEARCH;
497 
498  // NB: the first access to a page isn't necessarily at offset 0
499  // (memcpy isn't guaranteed to copy sequentially). rounding down
500  // is safe and necessary - see AddressRangeDescriptor::alignment.
501  const uintptr_t alignedAddress = round_down(address, d->alignment);
502  bool ok = d->Commit(alignedAddress);
503  if(!ok)
504  {
505  debug_printf(L"VectoredHandler: Commit(0x%p) failed; address=0x%p\n", alignedAddress, address);
506  ENSURE(0);
507  return EXCEPTION_CONTINUE_SEARCH;
508  }
509 
510  // continue at (i.e. retry) the same instruction.
511  return EXCEPTION_CONTINUE_EXECUTION;
512 }
513 
514 
515 static PVOID handler;
517 static volatile intptr_t references = 0; // atomic
518 
520 {
521  ENSURE(handler == 0);
522  handler = AddVectoredExceptionHandler(TRUE, VectoredHandler);
523  ENSURE(handler != 0);
524  return INFO::OK;
525 }
526 
527 static void ShutdownHandler()
528 {
529  ENSURE(handler != 0);
530  const ULONG ret = RemoveVectoredExceptionHandler(handler);
531  ENSURE(ret != 0);
532  handler = 0;
533 }
534 
536 {
539 }
540 
541 void EndOnDemandCommits()
542 {
543  if(cpu_AtomicAdd(&references, -1) == 1)
545 }
546 
547 } // namespace vm
#define PROT_WRITE
Definition: wmman.h:33
static BOOL WINAPI EmulateGetNumaProcessorNode(UCHAR processor, PUCHAR node)
Definition: wvm.cpp:62
static const size_t pageSize
Definition: alignment.h:61
#define UNUSED(param)
mark a function parameter as unused and avoid the corresponding compiler warning. ...
static AddressRangeDescriptor * FindDescriptor(uintptr_t address)
Definition: wvm.cpp:372
void BeginOnDemandCommits()
install a handler that attempts to commit memory whenever a read/write page fault is encountered...
Definition: uvm.cpp:120
#define COMPILER_FENCE
prevent the compiler from reordering loads or stores across this point.
const Status OK
Definition: status.h:386
#define WUTIL_FUNC(varName, ret, params)
Definition: wutil.h:44
static const size_t os_cpu_MaxProcessors
maximum number of processors supported by the OS (determined by the number of bits in an affinity mas...
Definition: os_cpu.h:50
CACHE_ALIGNED(struct Statistics)
Definition: wvm.cpp:101
const wchar_t * ErrorString(int err)
Definition: Util.cpp:160
static bool largePageAllocationTookTooLong
Definition: wvm.cpp:153
TIMER_ADD_CLIENT(tc_commit)
static Status InitHandler()
Definition: wvm.cpp:519
T round_up(T n, T multiple)
round number up/down to the next given multiple.
Definition: bits.h:265
static DWORD WINAPI EmulateGetCurrentProcessorNumber(VOID)
Definition: wvm.cpp:54
#define ASSERT(expr)
same as ENSURE in debug mode, does nothing in release mode.
Definition: debug.h:310
LIB_API double numa_Factor()
Definition: unuma.cpp:51
intptr_t cpu_AtomicAdd(volatile intptr_t *location, intptr_t increment)
add a signed value to a variable without the possibility of interference from other threads/CPUs...
Definition: arm.cpp:31
#define WINAPI
Definition: zlib.h:35
size_t os_cpu_NumProcessors()
Definition: bcpu.cpp:34
size_t os_cpu_LargePageSize()
Definition: bcpu.cpp:79
static PVOID handler
Definition: wvm.cpp:515
size_t ProcessorFromApicId(ApicId apicId)
Definition: apic.cpp:129
int BOOL
Definition: wgl.h:51
bool Commit(uintptr_t address, size_t size, PageType pageType, int prot)
map physical memory to previously reserved address space.
Definition: uvm.cpp:59
static bool ShouldUseLargePages(size_t allocationSize, DWORD allocationType, PageType pageType)
Definition: wvm.cpp:155
static LONG CALLBACK VectoredHandler(const PEXCEPTION_POINTERS ep)
Definition: wvm.cpp:469
static HANDLE hProcess
Definition: wdbg_sym.cpp:60
static AddressRangeDescriptor ranges[2 *os_cpu_MaxProcessors]
Definition: wvm.cpp:366
#define ARRAY_SIZE(name)
bool IsAligned(T t, uintptr_t multiple)
Definition: alignment.h:8
size_t wversion_Number()
Definition: wversion.cpp:65
#define TIMER_ACCRUE_ATOMIC(client)
Definition: timer.h:390
static volatile intptr_t references
Definition: wvm.cpp:517
#define ENSURE(expr)
ensure the expression &lt;expr&gt; evaluates to non-zero.
Definition: debug.h:282
#define CALLBACK
Definition: wgl.h:39
intptr_t ModuleInitState
initialization state of a module (class, source file, etc.) must be initialized to zero (e...
Definition: module_init.h:35
#define WINIT_REGISTER_CRITICAL_INIT(func)
Definition: winit.h:136
const Status LIMIT
Definition: status.h:428
Definition: vm.h:44
void * HANDLE
Definition: wgl.h:62
PageType
Definition: vm.h:42
static ModuleInitState initState
Definition: wvm.cpp:516
size_t os_cpu_MemoryAvailable()
Definition: bcpu.cpp:98
T round_down(T n, T multiple)
Definition: bits.h:274
unsigned long DWORD
Definition: wgl.h:56
void EndOnDemandCommits()
decrements the reference count begun by BeginOnDemandCommit and removes the page fault handler when i...
Definition: uvm.cpp:125
bool Decommit(uintptr_t address, size_t size)
unmap physical memory.
Definition: uvm.cpp:77
bool Protect(uintptr_t address, size_t size, int prot)
set the memory protection flags for all pages that intersect the given interval.
Definition: uvm.cpp:86
void VOID
Definition: wgl.h:49
i64 Status
Error handling system.
Definition: status.h:171
void DumpStatistics()
Definition: uvm.cpp:131
double timer_Time()
Definition: timer.cpp:98
#define DEBUG_WARN_ERR(status)
display the error dialog with text corresponding to the given error code.
Definition: debug.h:331
#define PROT_READ
Definition: wmman.h:32
#define u64
Definition: types.h:42
Status ModuleShutdown(volatile ModuleInitState *initState, void(*shutdown)())
calls a user-defined shutdown function if initState is &quot;initialized&quot;.
Definition: module_init.cpp:65
void ReleaseAddressSpace(void *p, size_t size)
release address space and decommit any memory.
Definition: uvm.cpp:49
LIB_API size_t numa_NumNodes()
Definition: unuma.cpp:29
const Status SKIPPED
Definition: status.h:392
static Status wvm_Init()
Definition: wvm.cpp:77
static void ShutdownHandler()
Definition: wvm.cpp:527
bool cpu_CAS(volatile intptr_t *location, intptr_t expected, intptr_t newValue)
atomic &quot;compare and swap&quot;.
Definition: arm.cpp:36
#define WARN_IF_FALSE(expression)
Definition: status.h:360
static const size_t largePageSize
Definition: alignment.h:62
unsigned MemoryProtectionFromPosix(int prot)
Definition: wmman.cpp:30
#define DEBUG_DISPLAY_ERROR(description)
Definition: debug.h:197
Definition: vm.h:45
void * Allocate(size_t size, PageType pageType, int prot)
reserve address space and commit memory.
Definition: uvm.cpp:98
u8 ApicId
Definition: apic.h:26
void * LPVOID
Definition: wgl.h:50
static void * AllocateLargeOrSmallPages(uintptr_t address, size_t size, DWORD allocationType, PageType pageType=kDefault, int prot=PROT_READ|PROT_WRITE)
Definition: wvm.cpp:202
void * ReserveAddressSpace(size_t size, size_t commitSize, PageType pageType, int prot)
reserve address space and set the parameters for any later on-demand commits.
Definition: uvm.cpp:40
long LONG
Definition: wgl.h:55
const Status NO_MEM
Definition: status.h:430
static LPVOID WINAPI EmulateVirtualAllocExNuma(HANDLE hProcess, LPVOID p, SIZE_T size, DWORD allocationType, DWORD protect, DWORD node)
Definition: wvm.cpp:71
static const size_t cacheLineSize
Definition: alignment.h:51
Status ModuleInit(volatile ModuleInitState *initState, Status(*init)())
calls a user-defined init function if initState is zero.
Definition: module_init.cpp:40
void Free(void *p, size_t size)
decommit memory and release address space.
Definition: uvm.cpp:113
const size_t WVERSION_VISTA
Definition: wversion.h:35
ApicId GetApicId()
Definition: apic.cpp:33
#define WUTIL_IMPORT_KERNEL32(procName, varName)
Definition: wutil.h:63
void debug_printf(const wchar_t *fmt,...)
write a formatted string to the debug channel, subject to filtering (see below).
Definition: debug.cpp:142
#define PROT_EXEC
Definition: wmman.h:34
#define PROT_NONE
Definition: wmman.h:31