Skip to content

Commit 28592f2

Browse files
committed
Swap in Allocator-based implementation of hash::RawTable.
1 parent 511e6b9 commit 28592f2

File tree

2 files changed

+72
-113
lines changed

2 files changed

+72
-113
lines changed

src/lib.rs

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
#![feature(unsafe_no_drop_flag)]
22
#![feature(dropck_parametricity)]
33
#![feature(filling_drop)]
4-
#![feature(heap_api, wrapping, unique)]
4+
#![feature(heap_api, unique)]
55
#![feature(alloc)]
6+
#![feature(nonzero)]
67

78
// features for mod collections
89
#![feature(box_syntax)]

src/std/collections/hash/table.rs

+70-112
Original file line numberDiff line numberDiff line change
@@ -8,20 +8,19 @@
88
// option. This file may not be copied, modified, or distributed
99
// except according to those terms.
1010

11-
use alloc::heap::{allocate, deallocate, EMPTY};
12-
use core_alloc::Allocator;
11+
use alloc::heap::{EMPTY};
12+
use core_alloc::{Allocator, Kind};
1313
use core_alloc::heap::Allocator as HeapAllocator;
1414

15-
use cmp;
1615
use hash::{Hash, Hasher};
1716
use marker;
18-
use mem::{align_of, size_of};
1917
use mem;
20-
use num::wrapping::OverflowingOps;
2118
use ops::{Deref, DerefMut};
2219
use ptr::{self, Unique};
2320
use collections::hash_state::HashState;
2421

22+
use core::nonzero;
23+
2524
use self::BucketState::*;
2625

2726
const EMPTY_BUCKET: u64 = 0;
@@ -170,7 +169,7 @@ pub fn make_hash<T: ?Sized, S>(hash_state: &S, t: &T) -> SafeHash
170169
// modified to no longer assume this.
171170
#[test]
172171
fn can_alias_safehash_as_u64() {
173-
assert_eq!(size_of::<SafeHash>(), size_of::<u64>())
172+
assert_eq!(mem::size_of::<SafeHash>(), mem::size_of::<u64>())
174173
}
175174

176175
impl<K, V> RawBucket<K, V> {
@@ -504,69 +503,49 @@ impl<K, V, M: Deref<Target=RawTable<K, V>>> GapThenFull<K, V, M> {
504503
}
505504
}
506505

507-
508-
/// Rounds up to a multiple of a power of two. Returns the closest multiple
509-
/// of `target_alignment` that is higher or equal to `unrounded`.
510-
///
511-
/// # Panics
512-
///
513-
/// Panics if `target_alignment` is not a power of two.
514-
#[inline]
515-
fn round_up_to_next(unrounded: usize, target_alignment: usize) -> usize {
516-
assert!(target_alignment.is_power_of_two());
517-
(unrounded + target_alignment - 1) & !(target_alignment - 1)
506+
struct AllocationInfo {
507+
kind: Kind,
508+
hash_offset: usize,
509+
keys_offset: usize,
510+
vals_offset: usize,
518511
}
519512

520-
#[test]
521-
fn test_rounding() {
522-
assert_eq!(round_up_to_next(0, 4), 0);
523-
assert_eq!(round_up_to_next(1, 4), 4);
524-
assert_eq!(round_up_to_next(2, 4), 4);
525-
assert_eq!(round_up_to_next(3, 4), 4);
526-
assert_eq!(round_up_to_next(4, 4), 4);
527-
assert_eq!(round_up_to_next(5, 4), 8);
528-
}
529-
530-
// Returns a tuple of (key_offset, val_offset),
531-
// from the start of a mallocated array.
513+
// Returns the kind of the combined hashes-keys-values array,
514+
// along with the offsets of the hashes, keys, and values portions.
515+
//
516+
// `capacity` must be non-zero.
532517
#[inline]
533-
fn calculate_offsets(hashes_size: usize,
534-
keys_size: usize, keys_align: usize,
535-
vals_align: usize)
536-
-> (usize, usize, bool) {
537-
let keys_offset = round_up_to_next(hashes_size, keys_align);
538-
let (end_of_keys, oflo) = keys_offset.overflowing_add(keys_size);
539-
540-
let vals_offset = round_up_to_next(end_of_keys, vals_align);
541-
542-
(keys_offset, vals_offset, oflo)
543-
}
544-
545-
// Returns a tuple of (minimum required malloc alignment, hash_offset,
546-
// array_size), from the start of a mallocated array.
547-
fn calculate_allocation(hash_size: usize, hash_align: usize,
548-
keys_size: usize, keys_align: usize,
549-
vals_size: usize, vals_align: usize)
550-
-> (usize, usize, usize, bool) {
551-
let hash_offset = 0;
552-
let (_, vals_offset, oflo) = calculate_offsets(hash_size,
553-
keys_size, keys_align,
554-
vals_align);
555-
let (end_of_vals, oflo2) = vals_offset.overflowing_add(vals_size);
556-
557-
let align = cmp::max(hash_align, cmp::max(keys_align, vals_align));
558-
559-
(align, hash_offset, end_of_vals, oflo || oflo2)
560-
}
561-
562-
#[test]
563-
fn test_offset_calculation() {
564-
assert_eq!(calculate_allocation(128, 8, 15, 1, 4, 4), (8, 0, 148, false));
565-
assert_eq!(calculate_allocation(3, 1, 2, 1, 1, 1), (1, 0, 6, false));
566-
assert_eq!(calculate_allocation(6, 2, 12, 4, 24, 8), (8, 0, 48, false));
567-
assert_eq!(calculate_offsets(128, 15, 1, 4), (128, 144, false));
568-
assert_eq!(calculate_offsets(3, 2, 1, 1), (3, 5, false));
569-
assert_eq!(calculate_offsets(6, 12, 4, 8), (8, 24, false));
518+
fn calculate_allocation<K, V>(capacity: usize) -> Option<AllocationInfo> {
519+
debug_assert!(capacity > 0);
520+
let hashes_kind = Kind::array::<u64>(capacity).unwrap();
521+
let opt_keys_kind = Kind::array::<K>(capacity);
522+
let opt_vals_kind = Kind::array::<V>(capacity);
523+
524+
let (hashes_kind, hashes_offset) = (hashes_kind, 0);
525+
526+
let (hk_kind, keys_offset) = match opt_keys_kind {
527+
Some(keys_kind) => match hashes_kind.extend(keys_kind) {
528+
Some(kind_offset) => kind_offset,
529+
None => return None,
530+
},
531+
None => {
532+
// can zero-sized keys ever actually make sense in a hashtable?
533+
let s = *hashes_kind.size(); (hashes_kind, s)
534+
},
535+
};
536+
537+
let (hkv_kind, vals_offset) = match opt_vals_kind {
538+
Some(vals_kind) => match hk_kind.extend(vals_kind) {
539+
Some(hkv_kind) => hkv_kind,
540+
None => return None,
541+
},
542+
None => { let s = *hk_kind.size(); (hk_kind, s) }
543+
};
544+
545+
return Some(AllocationInfo {
546+
kind: hkv_kind,
547+
hash_offset: hashes_offset, keys_offset: keys_offset, vals_offset: vals_offset
548+
});
570549
}
571550

572551
impl<K, V> RawTable<K, V> {
@@ -580,7 +559,7 @@ impl<K, V> RawTable<K, V> {
580559
impl<K, V, A> RawTable<K, V, A> where A: Allocator {
581560
/// Does not initialize the buckets. The caller should ensure they,
582561
/// at the very least, set every hash to EMPTY_BUCKET.
583-
unsafe fn new_uninitialized_in(capacity: usize, a: A) -> RawTable<K, V, A> {
562+
unsafe fn new_uninitialized_in(capacity: usize, mut a: A) -> RawTable<K, V, A> {
584563
if capacity == 0 {
585564
return RawTable {
586565
size: 0,
@@ -591,37 +570,25 @@ impl<K, V, A> RawTable<K, V, A> where A: Allocator {
591570
};
592571
}
593572

594-
// No need for `checked_mul` before a more restrictive check performed
595-
// later in this method.
596-
let hashes_size = capacity * size_of::<u64>();
597-
let keys_size = capacity * size_of::< K >();
598-
let vals_size = capacity * size_of::< V >();
599-
600573
// Allocating hashmaps is a little tricky. We need to allocate three
601574
// arrays, but since we know their sizes and alignments up front,
602575
// we just allocate a single array, and then have the subarrays
603576
// point into it.
604-
//
605-
// This is great in theory, but in practice getting the alignment
606-
// right is a little subtle. Therefore, calculating offsets has been
607-
// factored out into a different function.
608-
let (malloc_alignment, hash_offset, size, oflo) =
609-
calculate_allocation(
610-
hashes_size, align_of::<u64>(),
611-
keys_size, align_of::< K >(),
612-
vals_size, align_of::< V >());
613-
614-
assert!(!oflo, "capacity overflow");
615-
616-
// One check for overflow that covers calculation and rounding of size.
617-
let size_of_bucket = size_of::<u64>().checked_add(size_of::<K>()).unwrap()
618-
.checked_add(size_of::<V>()).unwrap();
619-
assert!(size >= capacity.checked_mul(size_of_bucket)
620-
.expect("capacity overflow"),
621-
"capacity overflow");
622-
623-
let buffer = allocate(size, malloc_alignment);
624-
if buffer.is_null() { ::alloc::oom() }
577+
let AllocationInfo { kind, hash_offset, .. } = calculate_allocation::<K, V>(capacity)
578+
.unwrap_or_else(|| panic!("capcaity overflow"));
579+
580+
// Check for overflow of an individual bucket.
581+
Kind::new::<u64>()
582+
.and_then(|hk| match Kind::new::<K>() {
583+
Some(k) => hk.extend(k).map(|kd|kd.0), None => Some(hk) })
584+
.and_then(|hkk| match Kind::new::<V>() {
585+
Some(v) => hkk.extend(v).map(|kd|kd.0), None => Some(hkk) })
586+
.unwrap_or_else(|| panic!("capacity overflow"));
587+
588+
let buffer = match a.alloc(kind) {
589+
Ok(buffer) => buffer,
590+
Err(_) => a.oom(),
591+
};
625592

626593
let hashes = buffer.offset(hash_offset as isize) as *mut u64;
627594

@@ -635,15 +602,14 @@ impl<K, V, A> RawTable<K, V, A> where A: Allocator {
635602
}
636603

637604
fn first_bucket_raw(&self) -> RawBucket<K, V> {
638-
let hashes_size = self.capacity * size_of::<u64>();
639-
let keys_size = self.capacity * size_of::<K>();
640-
641605
let buffer = *self.hashes as *mut u8;
642-
let (keys_offset, vals_offset, oflo) =
643-
calculate_offsets(hashes_size,
644-
keys_size, align_of::<K>(),
645-
align_of::<V>());
646-
debug_assert!(!oflo, "capacity overflow");
606+
let (keys_offset, vals_offset) = if self.capacity == 0 {
607+
(0, 0)
608+
} else {
609+
let AllocationInfo { keys_offset, vals_offset, .. } =
610+
calculate_allocation::<K, V>(self.capacity).unwrap();
611+
(keys_offset, vals_offset)
612+
};
647613
unsafe {
648614
RawBucket {
649615
hash: *self.hashes,
@@ -1027,18 +993,10 @@ impl<K, V, A> Drop for RawTable<K, V, A> where A: Allocator {
1027993
for _ in self.rev_move_buckets() {}
1028994
}
1029995

1030-
let hashes_size = self.capacity * size_of::<u64>();
1031-
let keys_size = self.capacity * size_of::<K>();
1032-
let vals_size = self.capacity * size_of::<V>();
1033-
let (align, _, size, oflo) =
1034-
calculate_allocation(hashes_size, align_of::<u64>(),
1035-
keys_size, align_of::<K>(),
1036-
vals_size, align_of::<V>());
1037-
1038-
debug_assert!(!oflo, "should be impossible");
996+
let AllocationInfo { kind, .. } = calculate_allocation::<K, V>(self.capacity).unwrap();
1039997

1040998
unsafe {
1041-
deallocate(*self.hashes as *mut u8, size, align);
999+
self.alloc.dealloc(nonzero::NonZero::new(*self.hashes as *mut u8), kind).unwrap();
10421000
// Remember how everything was allocated out of one buffer
10431001
// during initialization? We only need one call to free here.
10441002
}

0 commit comments

Comments
 (0)