Commit | Line | Data |
---|---|---|
f7a2be83 | 1 | /* |
0235b0db | 2 | * SPDX-License-Identifier: MIT |
f7a2be83 | 3 | * |
0235b0db | 4 | * Copyright 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
f7a2be83 MD |
5 | */ |
6 | ||
0235b0db MJ |
7 | #ifndef _BABELTRACE_MMAP_ALIGN_H |
8 | #define _BABELTRACE_MMAP_ALIGN_H | |
9 | ||
578e048b | 10 | #include "common/align.h" |
f7a2be83 | 11 | #include <stdlib.h> |
544d0515 | 12 | #include <stdint.h> |
578e048b MJ |
13 | #include "compat/mman.h" |
14 | #include "common/common.h" | |
f7a2be83 MD |
15 | |
16 | /* | |
17 | * This header implements a wrapper over mmap (mmap_align) that memory | |
18 | * maps a file region that is not necessarily multiple of the page size. | |
19 | * It returns a structure (instead of a pointer) that contains the mmap | |
20 | * pointer (page-aligned) and a pointer to the offset requested within | |
21 | * that page. Note: in the current implementation, the "addr" parameter | |
22 | * cannot be forced, so we allocate at an address chosen by the OS. | |
23 | */ | |
24 | ||
25 | struct mmap_align { | |
26 | void *page_aligned_addr; /* mmap address, aligned to floor */ | |
27 | size_t page_aligned_length; /* mmap length, containing range */ | |
28 | ||
29 | void *addr; /* virtual mmap address */ | |
30 | size_t length; /* virtual mmap length */ | |
31 | }; | |
32 | ||
8f76831a | 33 | static inline |
3b16a19b | 34 | off_t get_page_aligned_offset(off_t offset, int log_level) |
8f76831a | 35 | { |
3b16a19b | 36 | return ALIGN_FLOOR(offset, bt_mmap_get_offset_align_size(log_level)); |
8f76831a | 37 | } |
8f76831a | 38 | |
f7a2be83 MD |
39 | static inline |
40 | struct mmap_align *mmap_align(size_t length, int prot, | |
86d8b7b8 | 41 | int flags, int fd, off_t offset, int log_level) |
f7a2be83 MD |
42 | { |
43 | struct mmap_align *mma; | |
44 | off_t page_aligned_offset; /* mmap offset, aligned to floor */ | |
108e5a1e MJ |
45 | size_t page_size; |
46 | ||
86d8b7b8 | 47 | page_size = bt_common_get_page_size(log_level); |
f7a2be83 MD |
48 | |
49 | mma = malloc(sizeof(*mma)); | |
50 | if (!mma) | |
51 | return MAP_FAILED; | |
52 | mma->length = length; | |
3b16a19b | 53 | page_aligned_offset = get_page_aligned_offset(offset, log_level); |
f7a2be83 MD |
54 | /* |
55 | * Page aligned length needs to contain the requested range. | |
56 | * E.g., for a small range that fits within a single page, we might | |
57 | * require a 2 pages page_aligned_length if the range crosses a page | |
58 | * boundary. | |
59 | */ | |
108e5a1e | 60 | mma->page_aligned_length = ALIGN(length + offset - page_aligned_offset, page_size); |
04394229 | 61 | mma->page_aligned_addr = bt_mmap(NULL, mma->page_aligned_length, |
95c324a4 | 62 | prot, flags, fd, page_aligned_offset, log_level); |
8f76831a | 63 | if (mma->page_aligned_addr == MAP_FAILED) { |
f7a2be83 MD |
64 | free(mma); |
65 | return MAP_FAILED; | |
66 | } | |
544d0515 | 67 | mma->addr = ((uint8_t *) mma->page_aligned_addr) + (offset - page_aligned_offset); |
f7a2be83 MD |
68 | return mma; |
69 | } | |
70 | ||
71 | static inline | |
72 | int munmap_align(struct mmap_align *mma) | |
73 | { | |
74 | void *page_aligned_addr; | |
75 | size_t page_aligned_length; | |
76 | ||
77 | page_aligned_addr = mma->page_aligned_addr; | |
78 | page_aligned_length = mma->page_aligned_length; | |
79 | free(mma); | |
04394229 | 80 | return bt_munmap(page_aligned_addr, page_aligned_length); |
f7a2be83 MD |
81 | } |
82 | ||
83 | static inline | |
84 | void *mmap_align_addr(struct mmap_align *mma) | |
85 | { | |
86 | return mma->addr; | |
87 | } | |
88 | ||
89 | /* | |
90 | * Helper for special-cases, normally unused. | |
91 | */ | |
92 | static inline | |
93 | void mmap_align_set_addr(struct mmap_align *mma, void *addr) | |
94 | { | |
95 | mma->addr = addr; | |
96 | } | |
97 | ||
98 | #endif /* _BABELTRACE_MMAP_ALIGN_H */ |