|
| 1 | +// This package offers the MMap type that manipulates a memory mapped file or |
| 2 | +// device. |
| 3 | +// |
| 4 | +// IMPORTANT NOTE (1): The MMap type is backed by an unsafe memory region, |
| 5 | +// which is not covered by the normal rules of Go's memory management. If a |
| 6 | +// slice is taken out of it, and then the memory is explicitly unmapped through |
| 7 | +// one of the available methods, both the MMap value itself and the slice |
| 8 | +// obtained will now silently point to invalid memory. Attempting to access |
| 9 | +// data in them will crash the application. |
| 10 | + |
| 11 | +// +build windows |
| 12 | + |
| 13 | +package gommap |
| 14 | + |
| 15 | +import ( |
| 16 | + "errors" |
| 17 | + "os" |
| 18 | + "reflect" |
| 19 | + "syscall" |
| 20 | + "unsafe" |
| 21 | +) |
| 22 | + |
| 23 | +// The MMap type represents a memory mapped file or device. The slice offers |
| 24 | +// direct access to the memory mapped content. |
| 25 | +// |
| 26 | +// IMPORTANT: Please see note in the package documentation regarding the way |
| 27 | +// in which this type behaves. |
| 28 | +type MMap []byte |
| 29 | + |
| 30 | +// In order to implement 'Protect', use this to get back the original MMap properties from the memory address. |
| 31 | +var mmapAttrs = map[uintptr]*struct { |
| 32 | + fd uintptr |
| 33 | + offset int64 |
| 34 | + length int64 |
| 35 | + prot ProtFlags |
| 36 | + flags MapFlags |
| 37 | +}{} |
| 38 | + |
| 39 | +// GetFileSize gets the file length from its fd |
| 40 | +func GetFileSize(fd uintptr) (int64, error) { |
| 41 | + fh := syscall.Handle(fd) |
| 42 | + fsize, err := syscall.Seek(syscall.Handle(fh), 0, 2) |
| 43 | + syscall.Seek(fh, 0, 0) |
| 44 | + return fsize, err |
| 45 | +} |
| 46 | + |
| 47 | +// Map creates a new mapping in the virtual address space of the calling process. |
| 48 | +// This function will attempt to map the entire file by using the fstat system |
| 49 | +// call with the provided file descriptor to discover its length. |
| 50 | +func Map(fd uintptr, prot ProtFlags, flags MapFlags) (MMap, error) { |
| 51 | + return MapRegion(fd, 0, -1, prot, flags) |
| 52 | +} |
| 53 | + |
| 54 | +// MapRegion creates a new mapping in the virtual address space of the calling |
| 55 | +// process, using the specified region of the provided file or device. If -1 is |
| 56 | +// provided as length, this function will attempt to map until the end of the |
| 57 | +// provided file descriptor by using the fstat system call to discover its |
| 58 | +// length. |
| 59 | +func MapRegion(fd uintptr, offset, length int64, prot ProtFlags, flags MapFlags) (MMap, error) { |
| 60 | + if offset%int64(os.Getpagesize()) != 0 { |
| 61 | + return nil, errors.New("offset parameter must be a multiple of the system's page size") |
| 62 | + } |
| 63 | + if length == -1 { |
| 64 | + length, _ = GetFileSize(fd) |
| 65 | + } |
| 66 | + /* on windows, use PROT_COPY to do the same thing as linux MAP_PRIVATE flag do */ |
| 67 | + if flags == MAP_PRIVATE { |
| 68 | + prot = PROT_COPY |
| 69 | + } |
| 70 | + // return mmap(length, uintptr(prot), uintptr(flags), fd, offset) |
| 71 | + |
| 72 | + /*******************************/ |
| 73 | + m, e := mmap(length, uintptr(prot), uintptr(flags), fd, offset) |
| 74 | + dh := (*reflect.SliceHeader)(unsafe.Pointer(&m)) |
| 75 | + mmapAttrs[dh.Data] = &struct { |
| 76 | + fd uintptr |
| 77 | + offset int64 |
| 78 | + length int64 |
| 79 | + prot ProtFlags |
| 80 | + flags MapFlags |
| 81 | + }{fd, offset, length, prot, flags} |
| 82 | + return m, e |
| 83 | +} |
| 84 | + |
| 85 | +func (mmap *MMap) header() *reflect.SliceHeader { |
| 86 | + return (*reflect.SliceHeader)(unsafe.Pointer(mmap)) |
| 87 | +} |
| 88 | + |
| 89 | +// UnsafeUnmap deletes the memory mapped region defined by the mmap slice. This |
| 90 | +// will also flush any remaining changes, if necessary. Using mmap or any |
| 91 | +// other slices based on it after this method has been called will crash the |
| 92 | +// application. |
| 93 | +func (mmap MMap) UnsafeUnmap() error { |
| 94 | + dh := mmap.header() |
| 95 | + return unmap(dh.Data, uintptr(dh.Len)) |
| 96 | +} |
| 97 | + |
| 98 | +// Sync flushes changes made to the region determined by the mmap slice |
| 99 | +// back to the device. Without calling this method, there are no guarantees |
| 100 | +// that changes will be flushed back before the region is unmapped. The |
| 101 | +// flags parameter specifies whether flushing should be done synchronously |
| 102 | +// (before the method returns) with MS_SYNC, or asynchronously (flushing is just |
| 103 | +// scheduled) with MS_ASYNC. |
| 104 | +func (mmap MMap) Sync(flags SyncFlags) error { |
| 105 | + dh := mmap.header() |
| 106 | + return flush(dh.Data, uintptr(dh.Len)) |
| 107 | +} |
| 108 | + |
| 109 | +// // Advise advises the kernel about how to handle the mapped memory |
| 110 | +// // region in terms of input/output paging within the memory region |
| 111 | +// // defined by the mmap slice. |
| 112 | +// func (mmap MMap) Advise(advice AdviseFlags) error { |
| 113 | +// // rh := *(*reflect.SliceHeader)(unsafe.Pointer(&mmap)) |
| 114 | +// // _, _, err := syscall.Syscall(syscall.SYS_MADVISE, uintptr(rh.Data), uintptr(rh.Len), uintptr(advice)) |
| 115 | +// // if err != 0 { |
| 116 | +// // return err |
| 117 | +// // } |
| 118 | +// // return nil |
| 119 | +// } |
| 120 | + |
| 121 | +// Protect changes the protection flags for the memory mapped region |
| 122 | +// defined by the mmap slice. |
| 123 | +// We use unmap & map again to implement this on windows. So can only change the protect flags on the whole |
| 124 | +func (mmap *MMap) Protect(prot ProtFlags) (err error) { |
| 125 | + dh := mmap.header() |
| 126 | + var m MMap |
| 127 | + if err = mmap.UnsafeUnmap(); err == nil { |
| 128 | + fd, offset, length, flags := mmapAttrs[dh.Data].fd, mmapAttrs[dh.Data].offset, mmapAttrs[dh.Data].length, mmapAttrs[dh.Data].flags |
| 129 | + mmapAttrs[dh.Data] = nil |
| 130 | + if m, err = MapRegion(fd, offset, length, prot, flags); err == nil { |
| 131 | + mmap = &m |
| 132 | + } |
| 133 | + } |
| 134 | + return |
| 135 | +} |
| 136 | + |
| 137 | +// Lock locks the mapped region defined by the mmap slice, |
| 138 | +// preventing it from being swapped out. |
| 139 | +func (mmap MMap) Lock() error { |
| 140 | + dh := mmap.header() |
| 141 | + return lock(dh.Data, uintptr(dh.Len)) |
| 142 | +} |
| 143 | + |
| 144 | +// Unlock unlocks the mapped region defined by the mmap slice, |
| 145 | +// allowing it to swap out again. |
| 146 | +func (mmap MMap) Unlock() error { |
| 147 | + dh := mmap.header() |
| 148 | + return unlock(dh.Data, uintptr(dh.Len)) |
| 149 | +} |
| 150 | + |
| 151 | +// // IsResident returns a slice of booleans informing whether the respective |
| 152 | +// // memory page in mmap was mapped at the time the call was made. |
| 153 | +// func (mmap MMap) IsResident() ([]bool, error) { |
| 154 | +// pageSize := os.Getpagesize() |
| 155 | +// result := make([]bool, (len(mmap)+pageSize-1)/pageSize) |
| 156 | +// rh := *(*reflect.SliceHeader)(unsafe.Pointer(&mmap)) |
| 157 | +// resulth := *(*reflect.SliceHeader)(unsafe.Pointer(&result)) |
| 158 | +// _, _, err := syscall.Syscall(syscall.SYS_MINCORE, uintptr(rh.Data), uintptr(rh.Len), uintptr(resulth.Data)) |
| 159 | +// for i := range result { |
| 160 | +// *(*uint8)(unsafe.Pointer(&result[i])) &= 1 |
| 161 | +// } |
| 162 | +// if err != 0 { |
| 163 | +// return nil, err |
| 164 | +// } |
| 165 | +// return result, nil |
| 166 | +// } |
0 commit comments