Skip to content
This repository was archived by the owner on Apr 2, 2024. It is now read-only.

Fix skipping compression of first to last chunk #1081

Merged
merged 1 commit into from
Jan 27, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ We use the following categories for changes:

### Fixed
- Fix broken `promscale_packager` telemetry field for docker envs [#1077]
- Fix compression of old chunks thus reducing storage requirements [#1081]

## [0.8.0] - 2022-01-18

Expand Down
4 changes: 2 additions & 2 deletions pkg/migrations/migration_files_generated.go

Large diffs are not rendered by default.

7 changes: 4 additions & 3 deletions pkg/migrations/sql/idempotent/base.sql
Original file line number Diff line number Diff line change
Expand Up @@ -2911,21 +2911,22 @@ BEGIN
DECLARE
chunk_schema_name name;
chunk_table_name name;
chunk_range_end timestamptz;
chunk_num INT;
BEGIN
FOR chunk_schema_name, chunk_table_name, chunk_num IN
FOR chunk_schema_name, chunk_table_name, chunk_range_end, chunk_num IN
SELECT
chunk_schema,
chunk_name,
range_end,
row_number() OVER (ORDER BY range_end DESC)
FROM timescaledb_information.chunks
WHERE hypertable_schema = 'SCHEMA_DATA'
AND hypertable_name = metric_table
AND NOT is_compressed
AND range_end <= compress_before
ORDER BY range_end ASC
LOOP
CONTINUE WHEN chunk_num <= 1;
CONTINUE WHEN chunk_num <= 1 OR chunk_range_end > compress_before;
PERFORM SCHEMA_CATALOG.compress_chunk_for_metric(metric_table, chunk_schema_name, chunk_table_name);
COMMIT;
END LOOP;
Expand Down
13 changes: 13 additions & 0 deletions pkg/tests/end_to_end_tests/create_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1233,6 +1233,19 @@ func TestCustomCompressionJob(t *testing.T) {
if chunkIsCompressed("1970-03-01 00:00:00.001+00") {
t.Error("third chunk compressed too soon")
}

// Add an chunk at current time
insert = fmt.Sprintf(`INSERT INTO prom_data."%s" VALUES (NOW(), 0.1, 1);`, tableName)
_, err = db.Exec(context.Background(), insert)
if err != nil {
t.Fatal(err)
}

runCompressionJob()
// third chunk should be compressed since its not the last chunk anymore
if !chunkIsCompressed("1970-03-01 00:00:00.001+00") {
t.Error("third chunk not compressed when it should have been")
}
})
}

Expand Down