11use crate :: error:: Error ;
22use crate :: module:: Module ;
33use crate :: region:: RegionInternal ;
4- use libc:: { c_void, SIGSTKSZ } ;
4+ use libc:: c_void;
55use lucet_module:: GlobalValue ;
66use nix:: unistd:: { sysconf, SysconfVar } ;
77use std:: sync:: { Arc , Once , Weak } ;
@@ -112,26 +112,79 @@ impl Drop for Alloc {
112112 }
113113}
114114
115+ #[ derive( Clone , Copy , Debug , Eq , PartialEq ) ]
116+ pub enum AddrLocation {
117+ Heap ,
118+ InaccessibleHeap ,
119+ StackGuard ,
120+ Stack ,
121+ Globals ,
122+ SigStackGuard ,
123+ SigStack ,
124+ Unknown ,
125+ }
126+
127+ impl AddrLocation {
128+ /// If a fault occurs in this location, is it fatal to the entire process?
129+ ///
130+ /// This is currently a permissive baseline that only returns true for unknown locations and the
131+ /// signal stack guard, in case a `Region` implementation uses faults to populate the accessible
132+ /// locations like the heap and the globals.
133+ pub fn is_fault_fatal ( self ) -> bool {
134+ use AddrLocation :: * ;
135+ match self {
136+ SigStackGuard | Unknown => true ,
137+ _ => false ,
138+ }
139+ }
140+ }
141+
115142impl Alloc {
116- pub fn addr_in_guard_page ( & self , addr : * const c_void ) -> bool {
143+ /// Where in an `Alloc` does a particular address fall?
144+ pub fn addr_location ( & self , addr : * const c_void ) -> AddrLocation {
117145 let addr = addr as usize ;
118- let heap = self . slot ( ) . heap as usize ;
119- let guard_start = heap + self . heap_accessible_size ;
120- let guard_end = heap + self . slot ( ) . limits . heap_address_space_size ;
121- // eprintln!(
122- // "addr = {:p}, guard_start = {:p}, guard_end = {:p}",
123- // addr, guard_start as *mut c_void, guard_end as *mut c_void
124- // );
125- let stack_guard_end = self . slot ( ) . stack as usize ;
126- let stack_guard_start = stack_guard_end - host_page_size ( ) ;
127- // eprintln!(
128- // "addr = {:p}, stack_guard_start = {:p}, stack_guard_end = {:p}",
129- // addr, stack_guard_start as *mut c_void, stack_guard_end as *mut c_void
130- // );
131- let in_heap_guard = ( addr >= guard_start) && ( addr < guard_end) ;
132- let in_stack_guard = ( addr >= stack_guard_start) && ( addr < stack_guard_end) ;
133-
134- in_heap_guard || in_stack_guard
146+
147+ let heap_start = self . slot ( ) . heap as usize ;
148+ let heap_inaccessible_start = heap_start + self . heap_accessible_size ;
149+ let heap_inaccessible_end = heap_start + self . slot ( ) . limits . heap_address_space_size ;
150+
151+ if ( addr >= heap_start) && ( addr < heap_inaccessible_start) {
152+ return AddrLocation :: Heap ;
153+ }
154+ if ( addr >= heap_inaccessible_start) && ( addr < heap_inaccessible_end) {
155+ return AddrLocation :: InaccessibleHeap ;
156+ }
157+
158+ let stack_start = self . slot ( ) . stack as usize ;
159+ let stack_end = stack_start + self . slot ( ) . limits . stack_size ;
160+ let stack_guard_start = stack_start - host_page_size ( ) ;
161+
162+ if ( addr >= stack_guard_start) && ( addr < stack_start) {
163+ return AddrLocation :: StackGuard ;
164+ }
165+ if ( addr >= stack_start) && ( addr < stack_end) {
166+ return AddrLocation :: Stack ;
167+ }
168+
169+ let globals_start = self . slot ( ) . globals as usize ;
170+ let globals_end = globals_start + self . slot ( ) . limits . globals_size ;
171+
172+ if ( addr >= globals_start) && ( addr < globals_end) {
173+ return AddrLocation :: Globals ;
174+ }
175+
176+ let sigstack_start = self . slot ( ) . sigstack as usize ;
177+ let sigstack_end = sigstack_start + self . slot ( ) . limits . signal_stack_size ;
178+ let sigstack_guard_start = sigstack_start - host_page_size ( ) ;
179+
180+ if ( addr >= sigstack_guard_start) && ( addr < sigstack_start) {
181+ return AddrLocation :: SigStackGuard ;
182+ }
183+ if ( addr >= sigstack_start) && ( addr < sigstack_end) {
184+ return AddrLocation :: SigStack ;
185+ }
186+
187+ AddrLocation :: Unknown
135188 }
136189
137190 pub fn expand_heap ( & mut self , expand_bytes : u32 , module : & dyn Module ) -> Result < u32 , Error > {
@@ -318,7 +371,10 @@ impl Alloc {
318371
319372 /// Return the sigstack as a mutable byte slice.
320373 pub unsafe fn sigstack_mut ( & mut self ) -> & mut [ u8 ] {
321- std:: slice:: from_raw_parts_mut ( self . slot ( ) . sigstack as * mut u8 , libc:: SIGSTKSZ )
374+ std:: slice:: from_raw_parts_mut (
375+ self . slot ( ) . sigstack as * mut u8 ,
376+ self . slot ( ) . limits . signal_stack_size ,
377+ )
322378 }
323379
324380 pub fn mem_in_heap < T > ( & self , ptr : * const T , len : usize ) -> bool {
@@ -351,15 +407,30 @@ pub struct Limits {
351407 pub stack_size : usize ,
352408 /// Size of the globals region in bytes; each global uses 8 bytes. (default 4K)
353409 pub globals_size : usize ,
410+ /// Size of the signal stack in bytes. (default SIGSTKSZ for release builds, 12K for debug builds)
411+ ///
412+ /// This difference is to account for the greatly increased stack size usage in the signal
413+ /// handler when running without optimizations.
414+ ///
415+ /// Note that debug vs. release mode is determined by `cfg(debug_assertions)`, so if you are
416+ /// specifically enabling debug assertions in your release builds, the default signal stack will
417+ /// be larger.
418+ pub signal_stack_size : usize ,
354419}
355420
356- impl Default for Limits {
357- fn default ( ) -> Limits {
421+ #[ cfg( debug_assertions) ]
422+ pub const DEFAULT_SIGNAL_STACK_SIZE : usize = 12 * 1024 ;
423+ #[ cfg( not( debug_assertions) ) ]
424+ pub const DEFAULT_SIGNAL_STACK_SIZE : usize = libc:: SIGSTKSZ ;
425+
426+ impl Limits {
427+ pub const fn default ( ) -> Limits {
358428 Limits {
359429 heap_memory_size : 16 * 64 * 1024 ,
360430 heap_address_space_size : 0x200000000 ,
361431 stack_size : 128 * 1024 ,
362432 globals_size : 4096 ,
433+ signal_stack_size : DEFAULT_SIGNAL_STACK_SIZE ,
363434 }
364435 }
365436}
@@ -370,19 +441,18 @@ impl Limits {
370441 // * the instance (up to instance_heap_offset)
371442 // * the heap, followed by guard pages
372443 // * the stack (grows towards heap guard pages)
373- // * one guard page (for good luck?)
374444 // * globals
375445 // * one guard page (to catch signal stack overflow)
376- // * the signal stack (size given by signal.h SIGSTKSZ macro)
446+ // * the signal stack
377447
378448 [
379449 instance_heap_offset ( ) ,
380450 self . heap_address_space_size ,
381- self . stack_size ,
382451 host_page_size ( ) ,
452+ self . stack_size ,
383453 self . globals_size ,
384454 host_page_size ( ) ,
385- SIGSTKSZ ,
455+ self . signal_stack_size ,
386456 ]
387457 . iter ( )
388458 . try_fold ( 0usize , |acc, & x| acc. checked_add ( x) )
@@ -419,6 +489,11 @@ impl Limits {
419489 if self . stack_size <= 0 {
420490 return Err ( Error :: InvalidArgument ( "stack size must be greater than 0" ) ) ;
421491 }
492+ if self . signal_stack_size % host_page_size ( ) != 0 {
493+ return Err ( Error :: InvalidArgument (
494+ "signal stack size must be a multiple of host page size" ,
495+ ) ) ;
496+ }
422497 Ok ( ( ) )
423498 }
424499}
0 commit comments