@@ -7,7 +7,9 @@ package pgmodel
7
7
import (
8
8
"context"
9
9
"fmt"
10
+ "math"
10
11
"sort"
12
+ "strings"
11
13
"sync"
12
14
"sync/atomic"
13
15
"time"
@@ -116,18 +118,28 @@ type SampleInfoIterator struct {
116
118
sampleInfos []samplesInfo
117
119
sampleInfoIndex int
118
120
sampleIndex int
121
+ minSeen int64
119
122
}
120
123
121
124
// NewSampleInfoIterator is the constructor
122
125
func NewSampleInfoIterator () SampleInfoIterator {
123
- return SampleInfoIterator {sampleInfos : make ([]samplesInfo , 0 ), sampleIndex : - 1 , sampleInfoIndex : 0 }
126
+ si := SampleInfoIterator {sampleInfos : make ([]samplesInfo , 0 )}
127
+ si .ResetPosition ()
128
+ return si
124
129
}
125
130
126
131
//Append adds a sample info to the back of the iterator
127
132
func (t * SampleInfoIterator ) Append (s samplesInfo ) {
128
133
t .sampleInfos = append (t .sampleInfos , s )
129
134
}
130
135
136
+ //ResetPosition resets the iteration position to the beginning
137
+ func (t * SampleInfoIterator ) ResetPosition () {
138
+ t .sampleIndex = - 1
139
+ t .sampleInfoIndex = 0
140
+ t .minSeen = math .MaxInt64
141
+ }
142
+
131
143
// Next returns true if there is another row and makes the next row data
132
144
// available to Values(). When there are no more rows available or an error
133
145
// has occurred it returns false.
@@ -149,6 +161,9 @@ func (t *SampleInfoIterator) Values() ([]interface{}, error) {
149
161
sample .Value ,
150
162
info .seriesID ,
151
163
}
164
+ if t .minSeen > sample .Timestamp {
165
+ t .minSeen = sample .Timestamp
166
+ }
152
167
return row , nil
153
168
}
154
169
@@ -390,6 +405,7 @@ type insertHandler struct {
390
405
pending * pendingBuffer
391
406
seriesCache map [string ]SeriesID
392
407
metricTableName string
408
+ metricName string
393
409
}
394
410
395
411
type pendingBuffer struct {
@@ -480,6 +496,7 @@ func runInserterRoutine(conn pgxConn, input chan insertDataRequest, metricName s
480
496
pending : & pendingBuffer {make ([]insertDataTask , 0 ), NewSampleInfoIterator ()},
481
497
seriesCache : make (map [string ]SeriesID ),
482
498
metricTableName : tableName ,
499
+ metricName : metricName ,
483
500
}
484
501
485
502
for {
@@ -560,6 +577,40 @@ func (h *insertHandler) flush() {
560
577
h .flushPending (h .pending )
561
578
}
562
579
580
+ func (h * insertHandler ) decompressChunks (pending * pendingBuffer ) error {
581
+ log .Warn ("msg" , fmt .Sprintf ("Table %s was compressed, decompressing" , h .metricTableName ), "table" , h .metricTableName , "metric" , h .metricName )
582
+ minTime := model .Time (pending .batch .minSeen ).Time ()
583
+
584
+ //how much faster are we at ingestion than wall-clock time?
585
+ ingestSpeedup := 2
586
+ //delay the next compression job proportional to the duration between now and the data time + a constant safety
587
+ delayBy := (time .Since (minTime ) / time .Duration (ingestSpeedup )) + time .Duration (60 * time .Minute )
588
+ maxDelayBy := time .Hour * 24
589
+ if delayBy > maxDelayBy {
590
+ delayBy = maxDelayBy
591
+ }
592
+
593
+ _ , rescheduleErr := h .conn .Exec (context .Background (),
594
+ `SELECT alter_job_schedule(
595
+ (SELECT job_id
596
+ FROM _timescaledb_config.bgw_policy_compress_chunks p
597
+ INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = p.hypertable_id)
598
+ WHERE h.schema_name = $1 and h.table_name = $2),
599
+ next_start=>$3)` , dataSchema , h .metricTableName , time .Now ().Add (delayBy ))
600
+ if rescheduleErr != nil {
601
+ log .Error ("msg" , rescheduleErr , "context" , "Rescheduling compression" )
602
+ return rescheduleErr
603
+ }
604
+
605
+ _ , decompressErr := h .conn .Exec (context .Background (), "CALL " + catalogSchema + ".decompress_chunks_after($1, $2);" , h .metricTableName , minTime )
606
+ if decompressErr != nil {
607
+ log .Error ("msg" , decompressErr , "context" , "Decompressing chunks" )
608
+ return decompressErr
609
+ }
610
+
611
+ return nil
612
+ }
613
+
563
614
func (h * insertHandler ) flushPending (pending * pendingBuffer ) {
564
615
err := func () error {
565
616
_ , err := h .setSeriesIds (pending .batch .sampleInfos )
@@ -573,6 +624,23 @@ func (h *insertHandler) flushPending(pending *pendingBuffer) {
573
624
copyColumns ,
574
625
& pending .batch ,
575
626
)
627
+ if err != nil {
628
+ if pgErr , ok := err .(* pgconn.PgError ); ok && strings .Contains (pgErr .Message , "insert/update/delete not permitted" ) {
629
+ /* If the error was that the table is already compressed, decompress and try again. */
630
+ decompressErr := h .decompressChunks (pending )
631
+ if decompressErr != nil {
632
+ return fmt .Errorf ("Error while decompressing. Decompression error: %v, Original Error: %w" , decompressErr , err )
633
+ }
634
+
635
+ pending .batch .ResetPosition ()
636
+ _ , err = h .conn .CopyFrom (
637
+ context .Background (),
638
+ pgx.Identifier {dataSchema , h .metricTableName },
639
+ copyColumns ,
640
+ & pending .batch ,
641
+ )
642
+ }
643
+ }
576
644
return err
577
645
}()
578
646
@@ -592,7 +660,8 @@ func (h *insertHandler) flushPending(pending *pendingBuffer) {
592
660
// nil all pointers to prevent memory leaks
593
661
pending .batch .sampleInfos [i ] = samplesInfo {}
594
662
}
595
- pending .batch = SampleInfoIterator {sampleInfos : pending .batch .sampleInfos [:0 ], sampleIndex : - 1 , sampleInfoIndex : 0 }
663
+ pending .batch = SampleInfoIterator {sampleInfos : pending .batch .sampleInfos [:0 ]}
664
+ pending .batch .ResetPosition ()
596
665
}
597
666
598
667
func (h * insertHandler ) setSeriesIds (sampleInfos []samplesInfo ) (string , error ) {
0 commit comments