29
29
struct timeval start_date ; /* the process's start date in wall-clock time */
30
30
struct timeval ready_date ; /* date when the process was considered ready */
31
31
ullong start_time_ns ; /* the process's start date in internal monotonic time (ns) */
32
- volatile ullong global_now_ns ; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
32
+ volatile ullong _global_now_ns ; /* locally stored common monotonic date between all threads, in ns (wraps every 585 yr) */
33
+ volatile ullong * global_now_ns ; /* common monotonic date, may point to _global_now_ns or shared memory */
33
34
volatile uint _global_now_ms ; /* locally stored common monotonic date in milliseconds (may wrap) */
34
35
volatile uint * global_now_ms ; /* common monotonic date in milliseconds (may wrap), may point to _global_now_ms or shared memory */
35
36
@@ -239,7 +240,7 @@ void clock_update_local_date(int max_wait, int interrupted)
239
240
now_ns += ms_to_ns (max_wait );
240
241
241
242
/* consider the most recent known date */
242
- now_ns = MAX (now_ns , HA_ATOMIC_LOAD (& global_now_ns ));
243
+ now_ns = MAX (now_ns , HA_ATOMIC_LOAD (global_now_ns ));
243
244
244
245
/* this event is rare, but it requires proper handling because if
245
246
* we just left now_ns where it was, the date will not be updated
@@ -270,7 +271,7 @@ void clock_update_global_date()
270
271
* realistic regarding the global date, which only moves forward,
271
272
* otherwise catch up.
272
273
*/
273
- old_now_ns = _HA_ATOMIC_LOAD (& global_now_ns );
274
+ old_now_ns = _HA_ATOMIC_LOAD (global_now_ns );
274
275
old_now_ms = _HA_ATOMIC_LOAD (global_now_ms );
275
276
276
277
do {
@@ -300,7 +301,7 @@ void clock_update_global_date()
300
301
/* let's try to update the global_now_ns (both in nanoseconds
301
302
* and ms forms) or loop again.
302
303
*/
303
- } while ((!_HA_ATOMIC_CAS (& global_now_ns , & old_now_ns , now_ns ) ||
304
+ } while ((!_HA_ATOMIC_CAS (global_now_ns , & old_now_ns , now_ns ) ||
304
305
(now_ms != old_now_ms && !_HA_ATOMIC_CAS (global_now_ms , & old_now_ms , now_ms ))) &&
305
306
__ha_cpu_relax ());
306
307
@@ -323,10 +324,10 @@ void clock_init_process_date(void)
323
324
th_ctx -> prev_mono_time = th_ctx -> curr_mono_time = before_poll_mono_ns ;
324
325
gettimeofday (& date , NULL );
325
326
after_poll = before_poll = date ;
326
- global_now_ns = th_ctx -> curr_mono_time ;
327
- if (!global_now_ns ) // CLOCK_MONOTONIC not supported
328
- global_now_ns = tv_to_ns (& date );
329
- now_ns = global_now_ns ;
327
+ _global_now_ns = th_ctx -> curr_mono_time ;
328
+ if (!_global_now_ns ) // CLOCK_MONOTONIC not supported
329
+ _global_now_ns = tv_to_ns (& date );
330
+ now_ns = _global_now_ns ;
330
331
331
332
_global_now_ms = ns_to_ms (now_ns );
332
333
@@ -337,8 +338,8 @@ void clock_init_process_date(void)
337
338
* match and continue from this shifted date.
338
339
*/
339
340
now_offset = sec_to_ns ((uint )((uint )(- _global_now_ms ) / 1000U - BOOT_TIME_WRAP_SEC ));
340
- global_now_ns += now_offset ;
341
- now_ns = global_now_ns ;
341
+ _global_now_ns += now_offset ;
342
+ now_ns = _global_now_ns ;
342
343
now_ms = ns_to_ms (now_ns );
343
344
/* correct for TICK_ETNERITY (0) */
344
345
if (now_ms == TICK_ETERNITY )
@@ -347,6 +348,8 @@ void clock_init_process_date(void)
347
348
348
349
/* for now global_now_ms points to the process-local _global_now_ms */
349
350
global_now_ms = & _global_now_ms ;
351
+ /* same goes for global_ns_ns */
352
+ global_now_ns = & _global_now_ns ;
350
353
351
354
th_ctx -> idle_pct = 100 ;
352
355
clock_update_date (0 , 1 );
@@ -369,7 +372,7 @@ void clock_init_thread_date(void)
369
372
gettimeofday (& date , NULL );
370
373
after_poll = before_poll = date ;
371
374
372
- now_ns = _HA_ATOMIC_LOAD (& global_now_ns );
375
+ now_ns = _HA_ATOMIC_LOAD (global_now_ns );
373
376
th_ctx -> idle_pct = 100 ;
374
377
th_ctx -> prev_cpu_time = now_cpu_time ();
375
378
th_ctx -> prev_mono_time = now_mono_time ();
0 commit comments