@@ -65,21 +65,16 @@ static void continuous_agg_refresh_execute(const ContinuousAggRefreshState *refr
65
65
const InternalTimeRange * bucketed_refresh_window ,
66
66
const int32 chunk_id );
67
67
static void log_refresh_window (int elevel , const ContinuousAgg * cagg ,
68
- const InternalTimeRange * refresh_window , const char * msg ,
68
+ const InternalTimeRange * refresh_window ,
69
69
ContinuousAggRefreshContext context );
70
70
static void continuous_agg_refresh_execute_wrapper (const InternalTimeRange * bucketed_refresh_window ,
71
71
const ContinuousAggRefreshContext context ,
72
72
const long iteration , void * arg1_refresh ,
73
73
void * arg2_chunk_id );
74
- static void update_merged_refresh_window (const InternalTimeRange * bucketed_refresh_window ,
75
- const ContinuousAggRefreshContext context ,
76
- const long iteration , void * arg1_merged_refresh_window ,
77
- void * arg2 );
78
74
static void continuous_agg_refresh_with_window (const ContinuousAgg * cagg ,
79
75
const InternalTimeRange * refresh_window ,
80
76
const InvalidationStore * invalidations ,
81
- int32 chunk_id , const bool do_merged_refresh ,
82
- const InternalTimeRange merged_refresh_window ,
77
+ int32 chunk_id ,
83
78
const ContinuousAggRefreshContext context );
84
79
static void emit_up_to_date_notice (const ContinuousAgg * cagg ,
85
80
const ContinuousAggRefreshContext context );
@@ -428,8 +423,9 @@ continuous_agg_refresh_execute(const ContinuousAggRefreshState *refresh,
428
423
429
424
static void
430
425
log_refresh_window (int elevel , const ContinuousAgg * cagg , const InternalTimeRange * refresh_window ,
431
- const char * msg , ContinuousAggRefreshContext context )
426
+ ContinuousAggRefreshContext context )
432
427
{
428
+ const char * msg = "continuous aggregate refresh (individual invalidation) on" ;
433
429
if (context .callctx == CAGG_REFRESH_POLICY_BATCHED )
434
430
elog (elevel ,
435
431
"%s \"%s\" in window [ %s, %s ] (batch %d of %d)" ,
@@ -463,34 +459,10 @@ continuous_agg_refresh_execute_wrapper(const InternalTimeRange *bucketed_refresh
463
459
const int32 chunk_id = * (const int32 * ) arg2_chunk_id ;
464
460
(void ) iteration ;
465
461
466
- log_refresh_window (CAGG_REFRESH_LOG_LEVEL ,
467
- & refresh -> cagg ,
468
- bucketed_refresh_window ,
469
- "continuous aggregate refresh (individual invalidation) on" ,
470
- context );
462
+ log_refresh_window (CAGG_REFRESH_LOG_LEVEL , & refresh -> cagg , bucketed_refresh_window , context );
471
463
continuous_agg_refresh_execute (refresh , bucketed_refresh_window , chunk_id );
472
464
}
473
465
474
- static void
475
- update_merged_refresh_window (const InternalTimeRange * bucketed_refresh_window ,
476
- const ContinuousAggRefreshContext context , const long iteration ,
477
- void * arg1_merged_refresh_window , void * arg2 )
478
- {
479
- InternalTimeRange * merged_refresh_window = (InternalTimeRange * ) arg1_merged_refresh_window ;
480
- (void ) arg2 ;
481
-
482
- if (iteration == 0 )
483
- * merged_refresh_window = * bucketed_refresh_window ;
484
- else
485
- {
486
- if (bucketed_refresh_window -> start < merged_refresh_window -> start )
487
- merged_refresh_window -> start = bucketed_refresh_window -> start ;
488
-
489
- if (bucketed_refresh_window -> end > merged_refresh_window -> end )
490
- merged_refresh_window -> end = bucketed_refresh_window -> end ;
491
- }
492
- }
493
-
494
466
static long
495
467
continuous_agg_scan_refresh_window_ranges (const ContinuousAgg * cagg ,
496
468
const InternalTimeRange * refresh_window ,
@@ -572,8 +544,6 @@ static void
572
544
continuous_agg_refresh_with_window (const ContinuousAgg * cagg ,
573
545
const InternalTimeRange * refresh_window ,
574
546
const InvalidationStore * invalidations , int32 chunk_id ,
575
- const bool do_merged_refresh ,
576
- const InternalTimeRange merged_refresh_window ,
577
547
const ContinuousAggRefreshContext context )
578
548
{
579
549
ContinuousAggRefreshState refresh ;
@@ -593,34 +563,15 @@ continuous_agg_refresh_with_window(const ContinuousAgg *cagg,
593
563
if (ContinuousAggIsFinalized (cagg ))
594
564
chunk_id = INVALID_CHUNK_ID ;
595
565
596
- if (do_merged_refresh )
597
- {
598
- Assert (merged_refresh_window .type == refresh_window -> type );
599
- Assert (merged_refresh_window .start >= refresh_window -> start );
600
- Assert ((cagg -> bucket_function -> bucket_fixed_interval == false) ||
601
- (merged_refresh_window .end -
602
- ts_continuous_agg_fixed_bucket_width (cagg -> bucket_function ) <=
603
- refresh_window -> end ));
604
-
605
- log_refresh_window (CAGG_REFRESH_LOG_LEVEL ,
606
- cagg ,
607
- & merged_refresh_window ,
608
- "continuous aggregate refresh (merged invalidation) on" ,
609
- context );
610
- continuous_agg_refresh_execute (& refresh , & merged_refresh_window , chunk_id );
611
- }
612
- else
613
- {
614
- long count pg_attribute_unused ();
615
- count = continuous_agg_scan_refresh_window_ranges (cagg ,
616
- refresh_window ,
617
- invalidations ,
618
- context ,
619
- continuous_agg_refresh_execute_wrapper ,
620
- (void * ) & refresh /* arg1 */ ,
621
- (void * ) & chunk_id /* arg2 */ );
622
- Assert (count );
623
- }
566
+ long count pg_attribute_unused ();
567
+ count = continuous_agg_scan_refresh_window_ranges (cagg ,
568
+ refresh_window ,
569
+ invalidations ,
570
+ context ,
571
+ continuous_agg_refresh_execute_wrapper ,
572
+ (void * ) & refresh /* arg1 */ ,
573
+ (void * ) & chunk_id /* arg2 */ );
574
+ Assert (count );
624
575
}
625
576
626
577
#define REFRESH_FUNCTION_NAME "refresh_continuous_aggregate()"
@@ -701,24 +652,6 @@ emit_up_to_date_notice(const ContinuousAgg *cagg, const ContinuousAggRefreshCont
701
652
}
702
653
}
703
654
704
- void
705
- continuous_agg_calculate_merged_refresh_window (const ContinuousAgg * cagg ,
706
- const InternalTimeRange * refresh_window ,
707
- const InvalidationStore * invalidations ,
708
- InternalTimeRange * merged_refresh_window ,
709
- const ContinuousAggRefreshContext context )
710
- {
711
- long count pg_attribute_unused ();
712
- count = continuous_agg_scan_refresh_window_ranges (cagg ,
713
- refresh_window ,
714
- invalidations ,
715
- context ,
716
- update_merged_refresh_window ,
717
- (void * ) merged_refresh_window ,
718
- NULL /* arg2 */ );
719
- Assert (count );
720
- }
721
-
722
655
static bool
723
656
process_cagg_invalidations_and_refresh (const ContinuousAgg * cagg ,
724
657
const InternalTimeRange * refresh_window ,
@@ -727,8 +660,6 @@ process_cagg_invalidations_and_refresh(const ContinuousAgg *cagg,
727
660
{
728
661
InvalidationStore * invalidations ;
729
662
Oid hyper_relid = ts_hypertable_id_to_relid (cagg -> data .mat_hypertable_id , false);
730
- bool do_merged_refresh = false;
731
- InternalTimeRange merged_refresh_window ;
732
663
733
664
/* Lock the continuous aggregate's materialized hypertable to protect
734
665
* against concurrent refreshes. Only concurrent reads will be
@@ -742,12 +673,10 @@ process_cagg_invalidations_and_refresh(const ContinuousAgg *cagg,
742
673
invalidations = invalidation_process_cagg_log (cagg ,
743
674
refresh_window ,
744
675
ts_guc_cagg_max_individual_materializations ,
745
- & do_merged_refresh ,
746
- & merged_refresh_window ,
747
676
context ,
748
677
force );
749
678
750
- if (invalidations != NULL || do_merged_refresh )
679
+ if (invalidations != NULL )
751
680
{
752
681
if (context .callctx == CAGG_REFRESH_CREATION )
753
682
{
@@ -758,13 +687,7 @@ process_cagg_invalidations_and_refresh(const ContinuousAgg *cagg,
758
687
"aggregate on creation." )));
759
688
}
760
689
761
- continuous_agg_refresh_with_window (cagg ,
762
- refresh_window ,
763
- invalidations ,
764
- chunk_id ,
765
- do_merged_refresh ,
766
- merged_refresh_window ,
767
- context );
690
+ continuous_agg_refresh_with_window (cagg , refresh_window , invalidations , chunk_id , context );
768
691
if (invalidations )
769
692
invalidation_store_free (invalidations );
770
693
return true;
0 commit comments