Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: progress logging #290

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions packages/client/src/blob.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ pub mod create;
pub mod decompress;
pub mod download;
pub mod read;
pub mod read_progress;

/// A blob kind.
#[derive(Clone, Copy, Debug)]
Expand Down
12 changes: 12 additions & 0 deletions packages/client/src/blob/read.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use crate::{
self as tg,
blob::read_progress::ProgressReader,
handle::Ext as _,
util::serde::{BytesBase64, SeekFromString},
Client,
Expand Down Expand Up @@ -82,6 +83,13 @@ impl tg::Blob {
Reader::new(handle, self.clone()).await
}

pub async fn progress_reader<H>(&self, handle: &H) -> tg::Result<ProgressReader<H>>
where
H: tg::Handle,
{
ProgressReader::new(self.reader(handle).await.unwrap())
}

pub async fn bytes<H>(&self, handle: &H) -> tg::Result<Vec<u8>>
where
H: tg::Handle,
Expand Down Expand Up @@ -125,6 +133,10 @@ where
self.position
}

pub fn size(&self) -> u64 {
self.size
}

pub fn end(&self) -> bool {
self.position == self.size
}
Expand Down
85 changes: 85 additions & 0 deletions packages/client/src/blob/read_progress.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
use std::sync::atomic::AtomicU64;

use crate as tg;
use crate::blob::Reader;
use std::pin::Pin;
use std::sync::{atomic::Ordering, Arc};
use tokio::io::{AsyncBufRead, AsyncRead, AsyncSeek};

pub struct ProgressReader<H> {
inner: Reader<H>,
position: Arc<AtomicU64>,
}

impl<H> ProgressReader<H>
where
H: tg::Handle,
{
pub fn new(inner: Reader<H>) -> tg::Result<ProgressReader<H>> {
Ok(ProgressReader {
inner,
position: Arc::new(AtomicU64::new(0)),
})
}

pub fn position(&self) -> Arc<AtomicU64> {
self.position.clone()
}

pub fn size(&self) -> u64 {
self.inner.size()
}
}

impl<H> AsyncRead for ProgressReader<H>
where
H: tg::Handle,
{
fn poll_read(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> std::task::Poll<std::io::Result<()>> {
let this = self.get_mut();
let poll_result = Pin::new(&mut this.inner).poll_read(cx, buf);
if let std::task::Poll::Ready(Ok(())) = &poll_result {
let read_bytes = buf.filled().len() as u64;
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is ugly because we also keep track of progress in consume() below. In practice, we always use one or the other in extract/(de)compress. Taking suggestions on how to improve this.

this.position.fetch_add(read_bytes, Ordering::Relaxed);
}
poll_result
}
}

impl<H> AsyncSeek for ProgressReader<H>
where
H: tg::Handle,
{
fn start_seek(self: Pin<&mut Self>, position: std::io::SeekFrom) -> std::io::Result<()> {
Pin::new(&mut self.get_mut().inner).start_seek(position)
}

fn poll_complete(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<std::io::Result<u64>> {
Pin::new(&mut self.get_mut().inner).poll_complete(cx)
}
}

impl<H> AsyncBufRead for ProgressReader<H>
where
H: tg::Handle,
{
fn poll_fill_buf(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<std::io::Result<&[u8]>> {
Pin::new(&mut self.get_mut().inner).poll_fill_buf(cx)
}

fn consume(self: Pin<&mut Self>, amt: usize) {
let this = self.get_mut();
Pin::new(&mut this.inner).consume(amt);
this.position.fetch_add(amt as u64, Ordering::Relaxed);
}
}
51 changes: 49 additions & 2 deletions packages/server/src/runtime/builtin/compress.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,16 @@
use super::Runtime;
use byte_unit::Byte;
use num::ToPrimitive;
use std::pin::Pin;
use std::time::Duration;
use tangram_client as tg;
use tokio::io::AsyncRead;

impl Runtime {
pub async fn compress(
&self,
build: &tg::Build,
_remote: Option<String>,
remote: Option<String>,
) -> tg::Result<tg::Value> {
let server = &self.server;

Expand Down Expand Up @@ -36,8 +39,42 @@ impl Runtime {
.parse::<tg::blob::compress::Format>()
.map_err(|source| tg::error!(!source, "invalid format"))?;

let reader = blob.progress_reader(server).await?;
// Spawn a task to log progress.
let compressed = reader.position();
let content_length = reader.size();
let log_task = tokio::spawn({
let server = server.clone();
let build = build.clone();
let remote = remote.clone();
async move {
loop {
let compressed = compressed.load(std::sync::atomic::Ordering::Relaxed);
let percent =
100.0 * compressed.to_f64().unwrap() / content_length.to_f64().unwrap();
let compressed = Byte::from_u64(compressed);
let content_length = Byte::from_u64(content_length);
let message = format!(
"compressing: {compressed:#} of {content_length:#} {percent:.2}%\n"
);
let arg = tg::build::log::post::Arg {
bytes: message.into(),
remote: remote.clone(),
};
let result = build.add_log(&server, arg).await;
if result.is_err() {
break;
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
});
let log_task_abort_handle = log_task.abort_handle();
scopeguard::defer! {
log_task_abort_handle.abort();
};

// Compress the blob.
let reader = blob.reader(server).await?;
let reader: Pin<Box<dyn AsyncRead + Send + 'static>> = match format {
tg::blob::compress::Format::Bz2 => {
Box::pin(async_compression::tokio::bufread::BzEncoder::new(reader))
Expand All @@ -54,6 +91,16 @@ impl Runtime {
};
let blob = tg::Blob::with_reader(server, reader).await?;

log_task.abort();

// Log that the compression finished.
let message = format!("finished compressing\n");
let arg = tg::build::log::post::Arg {
bytes: message.into(),
remote: remote.clone(),
};
build.add_log(server, arg).await.ok();

Ok(blob.into())
}
}
52 changes: 50 additions & 2 deletions packages/server/src/runtime/builtin/decompress.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,16 @@
use super::Runtime;
use byte_unit::Byte;
use num::ToPrimitive;
use std::pin::Pin;
use std::time::Duration;
use tangram_client as tg;
use tokio::io::AsyncRead;

impl Runtime {
pub async fn decompress(
&self,
build: &tg::Build,
_remote: Option<String>,
remote: Option<String>,
) -> tg::Result<tg::Value> {
let server = &self.server;

Expand Down Expand Up @@ -36,8 +39,42 @@ impl Runtime {
.parse::<tg::blob::compress::Format>()
.map_err(|source| tg::error!(!source, "invalid format"))?;

let reader = blob.progress_reader(server).await?;
// Spawn a task to log progress.
let decompressed = reader.position();
let content_length = reader.size();
let log_task = tokio::spawn({
let server = server.clone();
let build = build.clone();
let remote = remote.clone();
async move {
loop {
let decompressed = decompressed.load(std::sync::atomic::Ordering::Relaxed);
let percent =
100.0 * decompressed.to_f64().unwrap() / content_length.to_f64().unwrap();
let decompressed = Byte::from_u64(decompressed);
let content_length = Byte::from_u64(content_length);
let message = format!(
"decompressing: {decompressed:#} of {content_length:#} {percent:.2}%\n"
);
let arg = tg::build::log::post::Arg {
bytes: message.into(),
remote: remote.clone(),
};
let result = build.add_log(&server, arg).await;
if result.is_err() {
break;
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
});
let log_task_abort_handle = log_task.abort_handle();
scopeguard::defer! {
log_task_abort_handle.abort();
};

// Decompress the blob.
let reader = blob.reader(server).await?;
let reader: Pin<Box<dyn AsyncRead + Send + 'static>> = match format {
tg::blob::compress::Format::Bz2 => {
Box::pin(async_compression::tokio::bufread::BzDecoder::new(reader))
Expand All @@ -52,8 +89,19 @@ impl Runtime {
Box::pin(async_compression::tokio::bufread::ZstdDecoder::new(reader))
},
};

let blob = tg::Blob::with_reader(server, reader).await?;

log_task.abort();

// Log that the decompression finished.
let message = format!("finished decompressing\n");
let arg = tg::build::log::post::Arg {
bytes: message.into(),
remote: remote.clone(),
};
build.add_log(server, arg).await.ok();

Ok(blob.into())
}
}
13 changes: 7 additions & 6 deletions packages/server/src/runtime/builtin/download.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use super::Runtime;
use byte_unit::Byte;
use futures::TryStreamExt as _;
use num::ToPrimitive as _;
use std::{
Expand Down Expand Up @@ -43,7 +44,7 @@ impl Runtime {
.await
.map_err(|source| tg::error!(!source, %url, "failed to perform the request"))?
.error_for_status()
.map_err(|source| tg::error!(!source, %url, "expected a sucess status"))?;
.map_err(|source| tg::error!(!source, %url, "expected a success status"))?;

// Spawn a task to log progress.
let downloaded = Arc::new(AtomicU64::new(0));
Expand All @@ -60,12 +61,12 @@ impl Runtime {
let message = if let Some(content_length) = content_length {
let percent =
100.0 * downloaded.to_f64().unwrap() / content_length.to_f64().unwrap();
let downloaded = byte_unit::Byte::from_u64(downloaded);
let content_length = byte_unit::Byte::from_u64(content_length);
format!("downloading from \"{url}\": {downloaded} of {content_length} {percent:.2}%\n")
let downloaded = Byte::from_u64(downloaded);
let content_length = Byte::from_u64(content_length);
format!("downloading from \"{url}\": {downloaded:#} of {content_length:#} {percent:.2}%\n")
} else {
let downloaded = byte_unit::Byte::from_u64(downloaded);
format!("downloading from \"{url}\": {downloaded}\n")
let downloaded = Byte::from_u64(downloaded);
format!("downloading from \"{url}\": {downloaded:#}\n")
};
let arg = tg::build::log::post::Arg {
bytes: message.into(),
Expand Down
48 changes: 46 additions & 2 deletions packages/server/src/runtime/builtin/extract.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,16 @@
use super::Runtime;
use crate::tmp::Tmp;
use byte_unit::Byte;
use num::ToPrimitive;
use std::time::Duration;
use tangram_client as tg;
use tokio_util::io::SyncIoBridge;

impl Runtime {
pub async fn extract(
&self,
build: &tg::Build,
_remote: Option<String>,
remote: Option<String>,
) -> tg::Result<tg::Value> {
let server = &self.server;

Expand Down Expand Up @@ -40,7 +43,38 @@ impl Runtime {
};

// Create the reader.
let reader = blob.reader(server).await?;
let reader = blob.progress_reader(server).await?;
let extracted = reader.position();
let content_length = reader.size();
let log_task = tokio::spawn({
let server = server.clone();
let build = build.clone();
let remote = remote.clone();
async move {
loop {
let extracted = extracted.load(std::sync::atomic::Ordering::Relaxed);
let percent =
100.0 * extracted.to_f64().unwrap() / content_length.to_f64().unwrap();
let extracted = Byte::from_u64(extracted);
let content_length = Byte::from_u64(content_length);
let message =
format!("extracting: {extracted:#} of {content_length:#} {percent:.2}%\n");
let arg = tg::build::log::post::Arg {
bytes: message.into(),
remote: remote.clone(),
};
let result = build.add_log(&server, arg).await;
if result.is_err() {
break;
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
});
let log_task_abort_handle = log_task.abort_handle();
scopeguard::defer! {
log_task_abort_handle.abort();
};

// Create a temporary path.
let tmp = Tmp::new(server);
Expand Down Expand Up @@ -77,6 +111,16 @@ impl Runtime {
.await
.unwrap()?;

log_task.abort();

// Log that the extraction finished.
let message = format!("finished extracting\n");
let arg = tg::build::log::post::Arg {
bytes: message.into(),
remote: remote.clone(),
};
build.add_log(server, arg).await.ok();

// Check in the extracted artifact.
let arg = tg::artifact::checkin::Arg {
destructive: true,
Expand Down