Improved performance for PageTable::cold_pages.

This commit is contained in:
Filipe Rodrigues 2023-07-12 08:14:46 +01:00
parent ce38df9215
commit 19440f6b92
3 changed files with 71 additions and 39 deletions

View File

@ -83,24 +83,31 @@ impl HeMem {
/// Cools a memory by (at most) `count` pages.
///
/// Returns the number of pages cooled.
/// Returns if any pages were cooled
///
/// # Panics
/// Panics if `mem_idx` is an invalid memory index
pub fn cool_memory(&mut self, cur_time: u64, mem_idx: MemIdx, count: usize) -> usize {
let mut cooled_pages = 0;
for page_ptr in self.page_table.cold_pages(
self.config.read_hot_threshold,
self.config.write_hot_threshold,
mem_idx,
count,
) {
if self.cool_page(cur_time, page_ptr).is_ok() {
cooled_pages += 1;
}
// TODO: Cool more than just 1 page at a time?
pub fn cool_memory(&mut self, cur_time: u64, mem_idx: MemIdx) -> bool {
// If there's isn't slower memory than `mem_idx`, we can't cool it
if self.memories.slower_memory(mem_idx).is_none() {
return false;
}
cooled_pages
// Get a cold page to cool, else we can't cool
let Some(page_ptr) = self
.page_table
.cold_pages(self.config.read_hot_threshold, self.config.write_hot_threshold, mem_idx)
.next()
else {
return false;
};
// Then try to cool the page
match self.cool_page(cur_time, page_ptr) {
Ok(()) => true,
Err(_) => false,
}
}
/// Migrates a page, possibly cooling the destination if full.
@ -118,7 +125,7 @@ impl HeMem {
match self.memories.migrate_page(src_mem_idx, dst_mem_idx) {
// If we managed to, move the page's memory
Ok(()) => {
page.move_mem(dst_mem_idx);
self.page_table.move_mem(page_ptr, dst_mem_idx);
self.statistics
.register_page_migration(page_ptr, statistics::PageMigration {
@ -137,18 +144,14 @@ impl HeMem {
"Unable to migrate page, cooling destination"
);
// TODO: Cool for more than just 1 page at a time?
let pages_cooled = self.cool_memory(cur_time, dst_mem_idx, 1);
match pages_cooled > 0 {
let pages_cooled = self.cool_memory(cur_time, dst_mem_idx);
match pages_cooled {
// If we cooled at least 1 page, migrate it
true => {
self.memories
.migrate_page(src_mem_idx, dst_mem_idx)
.expect("Just freed some pages when cooling");
self.page_table
.get_mut(page_ptr)
.expect("Page wasn't in page table")
.move_mem(dst_mem_idx);
self.page_table.move_mem(page_ptr, dst_mem_idx);
self.statistics
.register_page_migration(page_ptr, statistics::PageMigration {
time: cur_time,
@ -265,7 +268,7 @@ impl sim::Classifier for HeMem {
}
}
// If the page was cold and is now hot, head it
// If the page was cold and is now hot, heat it
if page_is_hot && !page_was_hot {
tracing::trace!(?page_ptr, "Page is now hot, warming it");
if let Err(err) = self.warm_page(trace.record.time, page_ptr) {

View File

@ -30,6 +30,14 @@ impl Memories {
.map(|(idx, mem)| (MemIdx(idx), mem))
}
/// Returns a memory by it's memory index
///
/// # Panics
/// Panics if `idx` is an invalid memory index.
pub fn get_mut(&mut self, idx: MemIdx) -> &mut Memory {
self.memories.get_mut(idx.0).expect("Memory index was invalid")
}
/// Migrates a page from `src` to `dst`
///
/// Returns `Err` if the source memory is empty or the destination memory is full.
@ -75,7 +83,7 @@ impl Memories {
}
/// Memory index
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug)]
pub struct MemIdx(usize);
impl MemIdx {

View File

@ -3,8 +3,7 @@
// Imports
use {
super::memories::MemIdx,
itertools::Itertools,
std::collections::{btree_map, BTreeMap},
std::collections::{btree_map, BTreeMap, BTreeSet},
};
/// Page table
@ -14,6 +13,11 @@ pub struct PageTable {
// TODO: `HashMap` with custom hash? We don't use the order
pages: BTreeMap<PagePtr, Page>,
/// Pages, by memory.
///
/// An index to be able to quickly find pages by their memory
pages_by_mem: BTreeMap<MemIdx, BTreeSet<PagePtr>>,
/// Current cooling clock tick
cooling_clock_tick: usize,
}
@ -23,6 +27,7 @@ impl PageTable {
pub fn new() -> Self {
Self {
pages: BTreeMap::new(),
pages_by_mem: BTreeMap::new(),
cooling_clock_tick: 0,
}
}
@ -42,6 +47,21 @@ impl PageTable {
Some(page)
}
/// Moves a page to `mem_idx`
///
/// # Panics
/// Panics if `page_ptr` is an invalid page pointer
pub fn move_mem(&mut self, page_ptr: PagePtr, mem_idx: MemIdx) {
let page = self.pages.get_mut(&page_ptr).expect("Invalid page pointer");
page.cool_accesses(self.cooling_clock_tick);
if mem_idx != page.mem_idx {
self.pages_by_mem.entry(mem_idx).or_default().remove(&page_ptr);
self.pages_by_mem.entry(page.mem_idx).or_default().insert(page_ptr);
page.mem_idx = mem_idx;
}
}
/// Inserts a new page into this page table.
///
/// # Errors
@ -51,8 +71,10 @@ impl PageTable {
btree_map::Entry::Vacant(entry) => {
// Note: We cool it before inserting to ensure that the page is up to date.
page.cool_accesses(self.cooling_clock_tick);
self.pages_by_mem.entry(page.mem_idx).or_default().insert(page.ptr);
entry.insert(page);
Ok(())
},
btree_map::Entry::Occupied(_) => anyhow::bail!("Page already existed: {page:?}"),
@ -76,15 +98,19 @@ impl PageTable {
read_hot_threshold: usize,
write_hot_threshold: usize,
mem_idx: MemIdx,
count: usize,
) -> Vec<PagePtr> {
self.pages
.iter_mut()
.update(|(_, page)| page.cool_accesses(self.cooling_clock_tick))
.filter(|(_, page)| page.mem_idx == mem_idx && !page.is_hot(read_hot_threshold, write_hot_threshold))
.map(|(&page_ptr, _)| page_ptr)
.take(count)
.collect()
) -> impl Iterator<Item = PagePtr> + '_ {
let pages = &self.pages;
self.pages_by_mem
.entry(mem_idx)
.or_default()
.iter()
.copied()
.filter(move |page_ptr| {
!pages
.get(page_ptr)
.expect("Invalid page pointer")
.is_hot(read_hot_threshold, write_hot_threshold)
})
}
}
@ -128,11 +154,6 @@ impl Page {
self.mem_idx
}
/// Moves this page to `mem_idx`
pub fn move_mem(&mut self, mem_idx: MemIdx) {
self.mem_idx = mem_idx;
}
/// Registers a read access
pub fn register_read_access(&mut self) {
self.adjusted_read_accesses += 1;