Skip to content

Commit 3dcae44

Browse files
committed
Minor: simplify logging some more and log js-runtime thread errors.
1 parent 7730faf commit 3dcae44

File tree

2 files changed

+54
-106
lines changed

2 files changed

+54
-106
lines changed

trailbase-core/src/js/runtime.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -303,9 +303,9 @@ impl RuntimeSingleton {
303303
})
304304
.collect();
305305

306-
for thread in threads {
307-
if thread.join().is_err() {
308-
log::error!("Failed to join worker");
306+
for (idx, thread) in threads.into_iter().enumerate() {
307+
if let Err(err) = thread.join() {
308+
log::error!("Failed to join worker: {idx}: {err:?}");
309309
}
310310
}
311311
});

trailbase-core/src/logging.rs

Lines changed: 51 additions & 103 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@ use axum_client_ip::InsecureClientIp;
55
use log::*;
66
use serde::{Deserialize, Serialize};
77
use serde_json::json;
8-
use std::collections::BTreeMap;
98
use std::time::Duration;
109
use tracing::field::Field;
1110
use tracing::span::{Attributes, Id, Record, Span};
@@ -27,15 +26,16 @@ use crate::AppState;
2726
// * We have a period task to wipe logs past their retention.
2827
//
2928
#[repr(i32)]
30-
#[derive(Debug, Clone, Serialize, Deserialize)]
29+
#[derive(Debug, Clone, Deserialize, Serialize)]
3130
enum LogType {
3231
Undefined = 0,
3332
AdminRequest = 1,
3433
HttpRequest = 2,
3534
RecordApiRequest = 3,
3635
}
3736

38-
#[derive(Debug, Clone, Serialize, Deserialize)]
37+
/// DB schema representation.
38+
#[derive(Debug, Clone, Deserialize, Serialize)]
3939
pub(crate) struct Log {
4040
pub id: Option<[u8; 16]>,
4141
pub created: Option<f64>,
@@ -82,14 +82,6 @@ pub(super) fn sqlite_logger_on_request(_req: &Request<Body>, _span: &Span) {
8282
// request related information into the span
8383
}
8484

85-
#[inline]
86-
fn as_millis_f64(d: &Duration) -> f64 {
87-
const NANOS_PER_MILLI: f64 = 1_000_000.0;
88-
const MILLIS_PER_SEC: u64 = 1_000;
89-
return (d.as_secs() as f64) * (MILLIS_PER_SEC as f64)
90-
+ (d.subsec_nanos() as f64) / (NANOS_PER_MILLI);
91-
}
92-
9385
pub(super) fn sqlite_logger_on_response(
9486
response: &Response<Body>,
9587
latency: Duration,
@@ -121,7 +113,7 @@ impl SqliteLogLayer {
121113
//
122114
// TODO: should we use a bounded receiver to create back-pressure?
123115
// TODO: We could use recv_many() and batch insert.
124-
fn write_log(&self, log: Log) -> Result<(), trailbase_sqlite::Error> {
116+
fn write_log(&self, log: LogFieldStorage) -> Result<(), trailbase_sqlite::Error> {
125117
return self.conn.call_and_forget(move |conn| {
126118
let mut stmt = match conn.prepare_cached(
127119
r#"
@@ -139,24 +131,23 @@ impl SqliteLogLayer {
139131
};
140132

141133
if let Err(err) = stmt.execute(rusqlite::params!(
142-
log.r#type,
134+
// "type": FIXME: should be: admin, records, auth, other request
135+
LogType::HttpRequest as i32,
143136
log.level,
144137
log.status,
145138
log.method,
146-
log.url,
147-
log.latency,
139+
log.uri,
140+
log.latency_ms,
148141
log.client_ip,
149142
log.referer,
150-
log.user_agent
143+
log.user_agent // FIXME: we're not writing the JSON data.
151144
)) {
152145
warn!("logs writing failed: {err}");
153146
}
154147
});
155148
}
156149
}
157150

158-
const BUG_TEXT: &str = "Span not found, this is a bug";
159-
160151
impl<S> Layer<S> for SqliteLogLayer
161152
where
162153
S: tracing::Subscriber,
@@ -176,60 +167,27 @@ where
176167
let mut extensions = span.extensions_mut();
177168
if let Some(storage) = extensions.get_mut::<LogFieldStorage>() {
178169
values.record(&mut LogJsonVisitor(storage));
170+
} else {
171+
info!("logs already consumed");
179172
}
180173
}
181174

182175
fn on_event(&self, event: &tracing::Event<'_>, ctx: Context<'_, S>) {
183-
let Some(span) = ctx.event_span(event) else {
184-
return;
185-
};
176+
let span = ctx.event_span(event).expect(BUG_TEXT);
186177

187178
let mut extensions = span.extensions_mut();
188-
if let Some(s) = extensions.get_mut::<LogFieldStorage>() {
189-
let mut storage = std::mem::take(s);
190-
179+
if let Some(mut storage) = extensions.remove::<LogFieldStorage>() {
191180
event.record(&mut LogJsonVisitor(&mut storage));
192181

193-
let log = Log {
194-
id: None,
195-
created: None,
196-
// FIXME: Is it a admin/records/auth,plain http request...?
197-
// Or should this even be here? Couldn't we just infer client-side by prefix?
198-
r#type: LogType::HttpRequest as i32,
199-
level: level_to_int(event.metadata().level()),
200-
status: storage.status as u16,
201-
method: storage.method,
202-
url: storage.uri,
203-
latency: storage.latency_ms,
204-
client_ip: storage.client_ip,
205-
referer: storage.referer,
206-
user_agent: storage.user_agent,
207-
data: {
208-
if storage.fields.is_empty() {
209-
None
210-
} else {
211-
Some(json!(storage.fields))
212-
}
213-
},
214-
};
182+
storage.level = level_to_int(event.metadata().level());
215183

216-
if let Err(err) = self.write_log(log) {
184+
if let Err(err) = self.write_log(storage) {
217185
warn!("Failed to send to logs to writer: {err}");
218186
}
219187
}
220188
}
221189
}
222190

223-
fn level_to_int(level: &tracing::Level) -> i32 {
224-
match *level {
225-
tracing::Level::TRACE => 4,
226-
tracing::Level::DEBUG => 3,
227-
tracing::Level::INFO => 2,
228-
tracing::Level::WARN => 1,
229-
tracing::Level::ERROR => 0,
230-
}
231-
}
232-
233191
#[derive(Debug, Default, Clone)]
234192
struct LogFieldStorage {
235193
// Request fields/properties.
@@ -241,13 +199,16 @@ struct LogFieldStorage {
241199
user_agent: String,
242200
version: String,
243201

202+
// Log level.
203+
level: i64,
204+
244205
// Response fields/properties
245206
status: u64,
246207
latency_ms: f64,
247208
length: i64,
248209

249210
// All other fields.
250-
fields: BTreeMap<&'static str, serde_json::Value>,
211+
fields: serde_json::Map<String, serde_json::Value>,
251212
}
252213

253214
struct LogJsonVisitor<'a>(&'a mut LogFieldStorage);
@@ -257,7 +218,7 @@ impl tracing::field::Visit for LogJsonVisitor<'_> {
257218
match field.name() {
258219
"latency_ms" => self.0.latency_ms = double,
259220
name => {
260-
self.0.fields.insert(name, json!(double));
221+
self.0.fields.insert(name.into(), double.into());
261222
}
262223
};
263224
}
@@ -266,7 +227,7 @@ impl tracing::field::Visit for LogJsonVisitor<'_> {
266227
match field.name() {
267228
"length" => self.0.length = int,
268229
name => {
269-
self.0.fields.insert(name, json!(int));
230+
self.0.fields.insert(name.into(), int.into());
270231
}
271232
};
272233
}
@@ -275,13 +236,13 @@ impl tracing::field::Visit for LogJsonVisitor<'_> {
275236
match field.name() {
276237
"status" => self.0.status = uint,
277238
name => {
278-
self.0.fields.insert(name, json!(uint));
239+
self.0.fields.insert(name.into(), uint.into());
279240
}
280241
};
281242
}
282243

283244
fn record_bool(&mut self, field: &Field, b: bool) {
284-
self.0.fields.insert(field.name(), json!(b));
245+
self.0.fields.insert(field.name().into(), b.into());
285246
}
286247

287248
fn record_str(&mut self, field: &Field, s: &str) {
@@ -291,7 +252,7 @@ impl tracing::field::Visit for LogJsonVisitor<'_> {
291252
"referer" => self.0.referer = s.to_string(),
292253
"user_agent" => self.0.user_agent = s.to_string(),
293254
name => {
294-
self.0.fields.insert(name, json!(s));
255+
self.0.fields.insert(name.into(), s.into());
295256
}
296257
};
297258
}
@@ -303,57 +264,44 @@ impl tracing::field::Visit for LogJsonVisitor<'_> {
303264
"uri" => self.0.uri = v,
304265
"version" => self.0.version = v,
305266
name => {
306-
self.0.fields.insert(name, json!(v));
267+
self.0.fields.insert(name.into(), v.into());
307268
}
308269
};
309270
}
310271

311272
fn record_error(&mut self, field: &Field, err: &(dyn std::error::Error + 'static)) {
312-
self.0.fields.insert(field.name(), json!(err.to_string()));
273+
self
274+
.0
275+
.fields
276+
.insert(field.name().into(), json!(err.to_string()));
313277
}
314278
}
315279

316-
// #[derive(Debug)]
317-
// struct CustomFieldStorage(BTreeMap<String, serde_json::Value>);
318-
//
319-
// struct JsonVisitor<'a>(&'a mut BTreeMap<String, serde_json::Value>);
320-
//
321-
// impl<'a> tracing::field::Visit for JsonVisitor<'a> {
322-
// fn record_f64(&mut self, field: &Field, double: f64) {
323-
// self.0.insert(field.name().to_string(), json!(double));
324-
// }
325-
//
326-
// fn record_i64(&mut self, field: &Field, int: i64) {
327-
// self.0.insert(field.name().to_string(), json!(int));
328-
// }
329-
//
330-
// fn record_u64(&mut self, field: &Field, uint: u64) {
331-
// self.0.insert(field.name().to_string(), json!(uint));
332-
// }
333-
//
334-
// fn record_bool(&mut self, field: &Field, b: bool) {
335-
// self.0.insert(field.name().to_string(), json!(b));
336-
// }
337-
//
338-
// fn record_str(&mut self, field: &Field, s: &str) {
339-
// self.0.insert(field.name().to_string(), json!(s));
340-
// }
341-
//
342-
// fn record_error(&mut self, field: &Field, err: &(dyn std::error::Error + 'static)) {
343-
// self
344-
// .0
345-
// .insert(field.name().to_string(), json!(err.to_string()));
346-
// }
347-
//
348-
// fn record_debug(&mut self, field: &Field, dbg: &dyn std::fmt::Debug) {
349-
// self
350-
// .0
351-
// .insert(field.name().to_string(), json!(format!("{:?}", dbg)));
352-
// }
353-
// }
280+
#[inline]
281+
fn as_millis_f64(d: &Duration) -> f64 {
282+
const NANOS_PER_MILLI: f64 = 1_000_000.0;
283+
const MILLIS_PER_SEC: u64 = 1_000;
284+
285+
return (d.as_secs() as f64) * (MILLIS_PER_SEC as f64)
286+
+ (d.subsec_nanos() as f64) / (NANOS_PER_MILLI);
287+
}
354288

289+
#[inline]
355290
fn get_header<'a>(headers: &'a HeaderMap, header_name: &'static str) -> Option<&'a str> {
356291
headers
357292
.get(header_name)
358293
.and_then(|header_value| header_value.to_str().ok())
359294
}
295+
296+
#[inline]
297+
fn level_to_int(level: &tracing::Level) -> i64 {
298+
match *level {
299+
tracing::Level::TRACE => 4,
300+
tracing::Level::DEBUG => 3,
301+
tracing::Level::INFO => 2,
302+
tracing::Level::WARN => 1,
303+
tracing::Level::ERROR => 0,
304+
}
305+
}
306+
307+
const BUG_TEXT: &str = "Span not found, this is a bug";

0 commit comments

Comments
 (0)