Skip to content

Commit d56c3b9

Browse files
committed
Merge remote-tracking branch 'mmtk/master' into feature/check-fragmentation-immixspace
2 parents 4cfac97 + dd84218 commit d56c3b9

12 files changed

+153
-102
lines changed

src/policy/sft_map.rs

Lines changed: 34 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -67,17 +67,23 @@ pub trait SFTMap {
6767

6868
pub(crate) fn create_sft_map() -> Box<dyn SFTMap> {
6969
cfg_if::cfg_if! {
70-
if #[cfg(all(feature = "malloc_mark_sweep", target_pointer_width = "64"))] {
71-
// 64-bit malloc mark sweep needs a chunk-based SFT map, but the sparse map is not suitable for 64bits.
72-
Box::new(dense_chunk_map::SFTDenseChunkMap::new())
73-
} else if #[cfg(target_pointer_width = "64")] {
70+
if #[cfg(target_pointer_width = "64")] {
71+
// For 64bits, we generally want to use the space map, which requires using contiguous space and no off-heap memory.
72+
// If the requirements do not meet, we have to choose a different SFT map implementation.
7473
use crate::util::heap::layout::vm_layout::vm_layout;
75-
if vm_layout().force_use_contiguous_spaces {
76-
Box::new(space_map::SFTSpaceMap::new())
77-
} else {
74+
if !vm_layout().force_use_contiguous_spaces {
75+
// This is usually the case for compressed pointer. Use the 32bits implementation.
7876
Box::new(sparse_chunk_map::SFTSparseChunkMap::new())
77+
} else if cfg!(any(feature = "malloc_mark_sweep", feature = "vm_space")) {
78+
// We have off-heap memory (malloc'd objects, or VM space). We have to use a chunk-based map.
79+
Box::new(dense_chunk_map::SFTDenseChunkMap::new())
80+
} else {
81+
// We can use space map.
82+
Box::new(space_map::SFTSpaceMap::new())
7983
}
8084
} else if #[cfg(target_pointer_width = "32")] {
85+
// Use sparse chunk map. As we have limited virtual address range on 32 bits,
86+
// it is okay to have a sparse chunk map which maps every chunk into an index in the array.
8187
Box::new(sparse_chunk_map::SFTSparseChunkMap::new())
8288
} else {
8389
compile_err!("Cannot figure out which SFT map to use.");
@@ -154,15 +160,17 @@ mod space_map {
154160
/// Space map is a small table, and it has one entry for each MMTk space.
155161
pub struct SFTSpaceMap {
156162
sft: Vec<SFTRefStorage>,
163+
space_address_start: Address,
164+
space_address_end: Address,
157165
}
158166

159167
unsafe impl Sync for SFTSpaceMap {}
160168

161169
impl SFTMap for SFTSpaceMap {
162-
fn has_sft_entry(&self, _addr: Address) -> bool {
163-
// Address::ZERO is mapped to index 0, and Address::MAX is mapped to index 31 (TABLE_SIZE-1)
164-
// So any address has an SFT entry.
165-
true
170+
fn has_sft_entry(&self, addr: Address) -> bool {
171+
// An arbitrary address from Address::ZERO to Address::MAX will be cyclically mapped to an index between 0 and 31
172+
// Only addresses between the virtual address range we use have valid entries.
173+
addr >= self.space_address_start && addr < self.space_address_end
166174
}
167175

168176
fn get_side_metadata(&self) -> Option<&SideMetadataSpec> {
@@ -186,21 +194,23 @@ mod space_map {
186194
start: Address,
187195
bytes: usize,
188196
) {
189-
let table_size = Self::addr_to_index(Address::MAX) + 1;
190197
let index = Self::addr_to_index(start);
191198
if cfg!(debug_assertions) {
192199
// Make sure we only update from empty to a valid space, or overwrite the space
193200
let old = self.sft[index].load();
194201
assert!((*old).name() == EMPTY_SFT_NAME || (*old).name() == (*space).name());
195202
// Make sure the range is in the space
196203
let space_start = Self::index_to_space_start(index);
197-
// FIXME: Curerntly skip the check for the last space. The following works fine for MMTk internal spaces,
198-
// but the VM space is an exception. Any address after the last space is considered as the last space,
199-
// based on our indexing function. In that case, we cannot assume the end of the region is within the last space (with MAX_SPACE_EXTENT).
200-
if index != table_size - 1 {
201-
assert!(start >= space_start);
202-
assert!(start + bytes <= space_start + vm_layout().max_space_extent());
203-
}
204+
assert!(start >= space_start);
205+
assert!(
206+
start + bytes <= space_start + vm_layout().max_space_extent(),
207+
"The range of {} + {} bytes does not fall into the space range {} and {}, \
208+
and it is probably outside the address range we use.",
209+
start,
210+
bytes,
211+
space_start,
212+
space_start + vm_layout().max_space_extent()
213+
);
204214
}
205215

206216
self.sft.get_unchecked(index).store(space);
@@ -216,12 +226,15 @@ mod space_map {
216226
/// Create a new space map.
217227
#[allow(clippy::assertions_on_constants)] // We assert to make sure the constants
218228
pub fn new() -> Self {
229+
use crate::util::heap::layout::heap_parameters::MAX_SPACES;
219230
let table_size = Self::addr_to_index(Address::MAX) + 1;
220-
debug_assert!(table_size >= crate::util::heap::layout::heap_parameters::MAX_SPACES);
231+
debug_assert!(table_size >= MAX_SPACES);
221232
Self {
222233
sft: std::iter::repeat_with(SFTRefStorage::default)
223234
.take(table_size)
224235
.collect(),
236+
space_address_start: Self::index_to_space_range(1).0, // the start of the first space
237+
space_address_end: Self::index_to_space_range(MAX_SPACES - 1).1, // the end of the last space
225238
}
226239
}
227240

@@ -261,7 +274,7 @@ mod space_map {
261274

262275
let assert_for_index = |i: usize| {
263276
let (start, end) = SFTSpaceMap::index_to_space_range(i);
264-
debug!("Space: Index#{} = [{}, {})", i, start, end);
277+
println!("Space: Index#{} = [{}, {})", i, start, end);
265278
assert_eq!(SFTSpaceMap::addr_to_index(start), i);
266279
assert_eq!(SFTSpaceMap::addr_to_index(end - 1), i);
267280
};

src/policy/vmspace.rs

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -127,20 +127,6 @@ impl<VM: VMBinding> Space<VM> for VMSpace<VM> {
127127
// The default implementation checks with vm map. But vm map has some assumptions about
128128
// the address range for spaces and the VM space may break those assumptions (as the space is
129129
// mmapped by the runtime rather than us). So we we use SFT here.
130-
131-
// However, SFT map may not be an ideal solution either for 64 bits. The default
132-
// implementation of SFT map on 64 bits is `SFTSpaceMap`, which maps the entire address
133-
// space into an index between 0 and 31, and assumes any address with the same index
134-
// is in the same space (with the same SFT). MMTk spaces uses 1-16. We guarantee that
135-
// VM space does not overlap with the address range that MMTk spaces may use. So
136-
// any region used as VM space will have an index of 0, or 17-31, and all the addresses
137-
// that are mapped to the same index will be considered as in the VM space. That means,
138-
// after we map a region as VM space, the nearby addresses will also be considered
139-
// as in the VM space if we use the default `SFTSpaceMap`. We can guarantee the nearby
140-
// addresses are not MMTk spaces, but we cannot tell whether they really in the VM space
141-
// or not.
142-
// A solution to this is to use `SFTDenseChunkMap` if `vm_space` is enabled on 64 bits.
143-
// `SFTDenseChunkMap` has an overhead of a few percentages (~3%) compared to `SFTSpaceMap`.
144130
SFT_MAP.get_checked(start).name() == self.name()
145131
}
146132
}

src/util/heap/layout/byte_map_mmapper.rs

Lines changed: 33 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -229,16 +229,16 @@ mod tests {
229229
#[test]
230230
fn ensure_mapped_1page() {
231231
serial_test(|| {
232+
let pages = 1;
233+
let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
234+
let end_chunk =
235+
ByteMapMmapper::address_to_mmap_chunks_up(FIXED_ADDRESS + pages_to_bytes(pages));
236+
let test_memory_bytes = (end_chunk - start_chunk) * MMAP_CHUNK_BYTES;
232237
with_cleanup(
233238
|| {
234239
let mmapper = ByteMapMmapper::new();
235-
let pages = 1;
236240
mmapper.ensure_mapped(FIXED_ADDRESS, pages).unwrap();
237241

238-
let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
239-
let end_chunk = ByteMapMmapper::address_to_mmap_chunks_up(
240-
FIXED_ADDRESS + pages_to_bytes(pages),
241-
);
242242
for chunk in start_chunk..end_chunk {
243243
assert_eq!(
244244
mmapper.mapped[chunk].load(Ordering::Relaxed),
@@ -247,7 +247,7 @@ mod tests {
247247
}
248248
},
249249
|| {
250-
memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap();
250+
memory::munmap(FIXED_ADDRESS, test_memory_bytes).unwrap();
251251
},
252252
)
253253
})
@@ -256,16 +256,16 @@ mod tests {
256256
#[test]
257257
fn ensure_mapped_1chunk() {
258258
serial_test(|| {
259+
let pages = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
260+
let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
261+
let end_chunk =
262+
ByteMapMmapper::address_to_mmap_chunks_up(FIXED_ADDRESS + pages_to_bytes(pages));
263+
let test_memory_bytes = (end_chunk - start_chunk) * MMAP_CHUNK_BYTES;
259264
with_cleanup(
260265
|| {
261266
let mmapper = ByteMapMmapper::new();
262-
let pages = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
263267
mmapper.ensure_mapped(FIXED_ADDRESS, pages).unwrap();
264268

265-
let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
266-
let end_chunk = ByteMapMmapper::address_to_mmap_chunks_up(
267-
FIXED_ADDRESS + pages_to_bytes(pages),
268-
);
269269
for chunk in start_chunk..end_chunk {
270270
assert_eq!(
271271
mmapper.mapped[chunk].load(Ordering::Relaxed),
@@ -274,7 +274,7 @@ mod tests {
274274
}
275275
},
276276
|| {
277-
memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap();
277+
memory::munmap(FIXED_ADDRESS, test_memory_bytes).unwrap();
278278
},
279279
)
280280
})
@@ -283,11 +283,14 @@ mod tests {
283283
#[test]
284284
fn ensure_mapped_more_than_1chunk() {
285285
serial_test(|| {
286+
let pages = (MMAP_CHUNK_BYTES + MMAP_CHUNK_BYTES / 2) >> LOG_BYTES_IN_PAGE as usize;
287+
let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
288+
let end_chunk =
289+
ByteMapMmapper::address_to_mmap_chunks_up(FIXED_ADDRESS + pages_to_bytes(pages));
290+
let test_memory_bytes = (end_chunk - start_chunk) * MMAP_CHUNK_BYTES;
286291
with_cleanup(
287292
|| {
288293
let mmapper = ByteMapMmapper::new();
289-
let pages =
290-
(MMAP_CHUNK_BYTES + MMAP_CHUNK_BYTES / 2) >> LOG_BYTES_IN_PAGE as usize;
291294
mmapper.ensure_mapped(FIXED_ADDRESS, pages).unwrap();
292295

293296
let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
@@ -303,7 +306,7 @@ mod tests {
303306
}
304307
},
305308
|| {
306-
memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap();
309+
memory::munmap(FIXED_ADDRESS, test_memory_bytes).unwrap();
307310
},
308311
)
309312
})
@@ -312,17 +315,20 @@ mod tests {
312315
#[test]
313316
fn protect() {
314317
serial_test(|| {
318+
let test_memory_bytes = MMAP_CHUNK_BYTES * 2;
319+
let test_memory_pages = test_memory_bytes >> LOG_BYTES_IN_PAGE;
320+
let protect_memory_bytes = MMAP_CHUNK_BYTES;
321+
let protect_memory_pages = protect_memory_bytes >> LOG_BYTES_IN_PAGE;
315322
with_cleanup(
316323
|| {
317324
// map 2 chunks
318325
let mmapper = ByteMapMmapper::new();
319-
let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
320326
mmapper
321-
.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2)
327+
.ensure_mapped(FIXED_ADDRESS, test_memory_pages)
322328
.unwrap();
323329

324330
// protect 1 chunk
325-
mmapper.protect(FIXED_ADDRESS, pages_per_chunk);
331+
mmapper.protect(FIXED_ADDRESS, protect_memory_pages);
326332

327333
let chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
328334
assert_eq!(
@@ -335,7 +341,7 @@ mod tests {
335341
);
336342
},
337343
|| {
338-
memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap();
344+
memory::munmap(FIXED_ADDRESS, test_memory_bytes).unwrap();
339345
},
340346
)
341347
})
@@ -344,17 +350,20 @@ mod tests {
344350
#[test]
345351
fn ensure_mapped_on_protected_chunks() {
346352
serial_test(|| {
353+
let test_memory_bytes = MMAP_CHUNK_BYTES * 2;
354+
let test_memory_pages = test_memory_bytes >> LOG_BYTES_IN_PAGE;
355+
let protect_memory_pages_1 = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE; // protect one chunk in the first protect
356+
let protect_memory_pages_2 = test_memory_pages; // protect both chunks in the second protect
347357
with_cleanup(
348358
|| {
349359
// map 2 chunks
350360
let mmapper = ByteMapMmapper::new();
351-
let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
352361
mmapper
353-
.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2)
362+
.ensure_mapped(FIXED_ADDRESS, test_memory_pages)
354363
.unwrap();
355364

356365
// protect 1 chunk
357-
mmapper.protect(FIXED_ADDRESS, pages_per_chunk);
366+
mmapper.protect(FIXED_ADDRESS, protect_memory_pages_1);
358367

359368
let chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
360369
assert_eq!(
@@ -368,7 +377,7 @@ mod tests {
368377

369378
// ensure mapped - this will unprotect the previously protected chunk
370379
mmapper
371-
.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2)
380+
.ensure_mapped(FIXED_ADDRESS, protect_memory_pages_2)
372381
.unwrap();
373382
assert_eq!(
374383
mmapper.mapped[chunk].load(Ordering::Relaxed),
@@ -380,7 +389,7 @@ mod tests {
380389
);
381390
},
382391
|| {
383-
memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap();
392+
memory::munmap(FIXED_ADDRESS, test_memory_bytes).unwrap();
384393
},
385394
)
386395
})

src/util/test_util/fixtures.rs

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ impl<T: FixtureContent> Default for SerialFixture<T> {
115115
}
116116

117117
pub struct MMTKFixture {
118-
pub mmtk: &'static MMTK<MockVM>,
118+
mmtk: *mut MMTK<MockVM>,
119119
}
120120

121121
impl FixtureContent for MMTKFixture {
@@ -143,13 +143,21 @@ impl MMTKFixture {
143143

144144
let mmtk = memory_manager::mmtk_init(&builder);
145145
let mmtk_ptr = Box::into_raw(mmtk);
146-
let mmtk_static: &'static MMTK<MockVM> = unsafe { &*mmtk_ptr };
147146

148147
if initialize_collection {
148+
let mmtk_static: &'static MMTK<MockVM> = unsafe { &*mmtk_ptr };
149149
memory_manager::initialize_collection(mmtk_static, VMThread::UNINITIALIZED);
150150
}
151151

152-
MMTKFixture { mmtk: mmtk_static }
152+
MMTKFixture { mmtk: mmtk_ptr }
153+
}
154+
155+
pub fn get_mmtk(&self) -> &'static MMTK<MockVM> {
156+
unsafe { &*self.mmtk }
157+
}
158+
159+
pub fn get_mmtk_mut(&mut self) -> &'static mut MMTK<MockVM> {
160+
unsafe { &mut *self.mmtk }
153161
}
154162
}
155163

@@ -186,7 +194,7 @@ impl MutatorFixture {
186194
true,
187195
);
188196
let mutator =
189-
memory_manager::bind_mutator(mmtk.mmtk, VMMutatorThread(VMThread::UNINITIALIZED));
197+
memory_manager::bind_mutator(mmtk.get_mmtk(), VMMutatorThread(VMThread::UNINITIALIZED));
190198
Self { mmtk, mutator }
191199
}
192200

@@ -196,12 +204,12 @@ impl MutatorFixture {
196204
{
197205
let mmtk = MMTKFixture::create_with_builder(with_builder, true);
198206
let mutator =
199-
memory_manager::bind_mutator(mmtk.mmtk, VMMutatorThread(VMThread::UNINITIALIZED));
207+
memory_manager::bind_mutator(mmtk.get_mmtk(), VMMutatorThread(VMThread::UNINITIALIZED));
200208
Self { mmtk, mutator }
201209
}
202210

203211
pub fn mmtk(&self) -> &'static MMTK<MockVM> {
204-
self.mmtk.mmtk
212+
self.mmtk.get_mmtk()
205213
}
206214
}
207215

src/util/test_util/mod.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,10 +46,10 @@ mod test {
4646
}
4747
}
4848

49-
// Test with an address that works for 32bits.
49+
// Test with an address that works for 32bits. The address is chosen empirically.
5050
#[cfg(target_os = "linux")]
5151
const TEST_ADDRESS: Address =
52-
crate::util::conversions::chunk_align_down(unsafe { Address::from_usize(0x6000_0000) });
52+
crate::util::conversions::chunk_align_down(unsafe { Address::from_usize(0x7000_0000) });
5353
#[cfg(target_os = "macos")]
5454
const TEST_ADDRESS: Address =
5555
crate::util::conversions::chunk_align_down(unsafe { Address::from_usize(0x2_0000_0000) });

src/vm/tests/mock_tests/mock_test_allocate_without_initialize_collection.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ pub fn allocate_without_initialize_collection() {
2525

2626
// Build mutator
2727
let mut mutator = memory_manager::bind_mutator(
28-
fixture.mmtk,
28+
fixture.get_mmtk(),
2929
VMMutatorThread(VMThread::UNINITIALIZED),
3030
);
3131

src/vm/tests/mock_tests/mock_test_allocator_info.rs

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,12 +14,14 @@ pub fn test_allocator_info() {
1414
|| {
1515
let fixture = MMTKFixture::create();
1616

17-
let selector =
18-
memory_manager::get_allocator_mapping(fixture.mmtk, AllocationSemantics::Default);
17+
let selector = memory_manager::get_allocator_mapping(
18+
fixture.get_mmtk(),
19+
AllocationSemantics::Default,
20+
);
1921
let base_offset = crate::plan::Mutator::<MockVM>::get_allocator_base_offset(selector);
2022
let allocator_info = AllocatorInfo::new::<MockVM>(selector);
2123

22-
match *fixture.mmtk.get_options().plan {
24+
match *fixture.get_mmtk().get_options().plan {
2325
PlanSelector::NoGC
2426
| PlanSelector::Immix
2527
| PlanSelector::SemiSpace

0 commit comments

Comments
 (0)