diff --git a/.sqlx/query-27e9c3c8c3f7770a31967760fdce2ff0304cd9f28c3c8bc056c5b2a20952fc99.json b/.sqlx/query-27e9c3c8c3f7770a31967760fdce2ff0304cd9f28c3c8bc056c5b2a20952fc99.json deleted file mode 100644 index 68b8f882c..000000000 --- a/.sqlx/query-27e9c3c8c3f7770a31967760fdce2ff0304cd9f28c3c8bc056c5b2a20952fc99.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT releases.features as \"features?: Vec\"\n FROM releases\n INNER JOIN crates ON crates.id = releases.crate_id\n WHERE crates.name = $1 AND releases.version = $2", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "features?: Vec", - "type_info": { - "Custom": { - "name": "_feature", - "kind": { - "Array": { - "Custom": { - "name": "feature", - "kind": { - "Composite": [ - [ - "name", - "Text" - ], - [ - "subfeatures", - "TextArray" - ] - ] - } - } - } - } - } - } - } - ], - "parameters": { - "Left": [ - "Text", - "Text" - ] - }, - "nullable": [ - true - ] - }, - "hash": "27e9c3c8c3f7770a31967760fdce2ff0304cd9f28c3c8bc056c5b2a20952fc99" -} diff --git a/.sqlx/query-72a4b5a8e046a7196d7f27baad5ed82e22ad7b5333749fae4c75e0b8e1066e7f.json b/.sqlx/query-72a4b5a8e046a7196d7f27baad5ed82e22ad7b5333749fae4c75e0b8e1066e7f.json deleted file mode 100644 index 588312d6f..000000000 --- a/.sqlx/query-72a4b5a8e046a7196d7f27baad5ed82e22ad7b5333749fae4c75e0b8e1066e7f.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT c.name, r.version, r.release_time\n FROM crates c, releases r\n WHERE c.id = r.crate_id AND r.release_time IS NOT NULL\n ORDER BY r.release_time DESC\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "name", - "type_info": "Text" - }, - { - "ordinal": 1, - "name": "version", - "type_info": "Text" - }, - { - "ordinal": 2, - "name": "release_time", - "type_info": "Timestamptz" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false, - false, - true - ] - }, - "hash": "72a4b5a8e046a7196d7f27baad5ed82e22ad7b5333749fae4c75e0b8e1066e7f" -} diff --git a/src/bin/cratesfyi.rs b/src/bin/cratesfyi.rs index d8b5d6407..0d8b46ca6 100644 --- a/src/bin/cratesfyi.rs +++ b/src/bin/cratesfyi.rs @@ -1,9 +1,9 @@ +use std::env; use std::fmt::Write; use std::net::SocketAddr; use std::path::PathBuf; use std::str::FromStr; use std::sync::Arc; -use std::{env, fs}; use anyhow::{anyhow, Context as _, Error, Result}; use axum::async_trait; @@ -11,10 +11,9 @@ use clap::{Parser, Subcommand, ValueEnum}; use docs_rs::cdn::CdnBackend; use docs_rs::db::{self, add_path_into_database, Overrides, Pool, PoolClient}; use docs_rs::repositories::RepositoryStatsUpdater; -use docs_rs::storage::{rustdoc_archive_path, source_archive_path, PathNotFoundError}; use docs_rs::utils::{ get_config, get_crate_pattern_and_priority, list_crate_priorities, queue_builder, - remove_crate_priority, set_config, set_crate_priority, spawn_blocking, ConfigName, + remove_crate_priority, set_config, set_crate_priority, ConfigName, }; use docs_rs::{ start_background_metrics_webserver, start_web_server, AsyncStorage, BuildQueue, Config, @@ -24,7 +23,6 @@ use docs_rs::{ use futures_util::StreamExt; use humantime::Duration; use once_cell::sync::OnceCell; -use rusqlite::{Connection, OpenFlags}; use sentry::TransactionContext; use tokio::runtime::{Builder, Runtime}; use tracing_log::LogTracer; @@ -511,9 +509,6 @@ enum DatabaseSubcommand { /// temporary commant to update the `crates.latest_version_id` field UpdateLatestVersionId, - /// temporary command to rebuild a subset of the archive indexes - FixBrokenArchiveIndexes, - /// Updates Github/Gitlab stats for crates. UpdateRepositoryFields, @@ -572,99 +567,6 @@ impl DatabaseSubcommand { .context("Failed to run database migrations")? } - Self::FixBrokenArchiveIndexes => { - let pool = ctx.pool()?; - let build_queue = ctx.build_queue()?; - ctx.runtime()? - .block_on(async { - async fn queue_rebuild( - build_queue: Arc, - name: &str, - version: &str, - ) -> Result<()> { - spawn_blocking({ - let name = name.to_owned(); - let version = version.to_owned(); - move || { - if !build_queue.has_build_queued(&name, &version)? { - build_queue.add_crate(&name, &version, 5, None)?; - } - Ok(()) - } - }) - .await - } - let storage = ctx.async_storage().await?; - let mut conn = pool.get_async().await?; - let mut result_stream = sqlx::query!( - " - SELECT c.name, r.version, r.release_time - FROM crates c, releases r - WHERE c.id = r.crate_id AND r.release_time IS NOT NULL - ORDER BY r.release_time DESC - " - ) - .fetch(&mut *conn); - - while let Some(row) = result_stream.next().await { - let row = row?; - - println!( - "checking index for {} {} ({:?})", - row.name, row.version, row.release_time - ); - - for path in &[ - rustdoc_archive_path(&row.name, &row.version), - source_archive_path(&row.name, &row.version), - ] { - let local_archive_index_filename = match storage - .download_archive_index(path, 42) - .await - { - Ok(path) => path, - Err(err) - if err.downcast_ref::().is_some() => - { - continue - } - Err(err) => return Err(err), - }; - - let count = { - let connection = match Connection::open_with_flags( - &local_archive_index_filename, - OpenFlags::SQLITE_OPEN_READ_ONLY - | OpenFlags::SQLITE_OPEN_NO_MUTEX, - ) { - Ok(conn) => conn, - Err(err) => { - println!("... error opening sqlite db, queueing rebuild: {:?}", err); - queue_rebuild(build_queue.clone(), &row.name, &row.version).await?; - continue; - } - }; - let mut stmt = - connection.prepare("SELECT count(*) FROM files")?; - - stmt.query_row([], |row| Ok(row.get::<_, usize>(0)))?? - }; - - fs::remove_file(&local_archive_index_filename)?; - - if count >= 65000 { - println!("...big index, queueing rebuild"); - queue_rebuild(build_queue.clone(), &row.name, &row.version) - .await?; - } - } - } - - Ok::<(), anyhow::Error>(()) - }) - .context("Failed to queue rebuilds for big documentation sizes")? - } - Self::UpdateLatestVersionId => { let pool = ctx.pool()?; ctx.runtime()? @@ -679,7 +581,7 @@ impl DatabaseSubcommand { while let Some(row) = result_stream.next().await { let row = row?; - println!("handling crate {} ", row.name); + println!("handling crate {}", row.name); db::update_latest_version_id(&mut update_conn, row.id).await?; } diff --git a/src/build_queue.rs b/src/build_queue.rs index 23068ac00..506acee9b 100644 --- a/src/build_queue.rs +++ b/src/build_queue.rs @@ -151,7 +151,7 @@ impl BuildQueue { .collect()) } - pub fn has_build_queued(&self, name: &str, version: &str) -> Result { + pub(crate) fn has_build_queued(&self, name: &str, version: &str) -> Result { Ok(self .db .get()? diff --git a/src/storage/mod.rs b/src/storage/mod.rs index b6a770feb..a79cba14b 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -28,7 +28,7 @@ type FileRange = RangeInclusive; #[derive(Debug, thiserror::Error)] #[error("path not found")] -pub struct PathNotFoundError; +pub(crate) struct PathNotFoundError; #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub(crate) struct Blob { @@ -304,7 +304,7 @@ impl AsyncStorage { } #[instrument] - pub async fn download_archive_index( + pub(super) async fn download_archive_index( &self, archive_path: &str, latest_build_id: i32, @@ -823,11 +823,11 @@ fn detect_mime(file_path: impl AsRef) -> &'static str { } } -pub fn rustdoc_archive_path(name: &str, version: &str) -> String { +pub(crate) fn rustdoc_archive_path(name: &str, version: &str) -> String { format!("rustdoc/{name}/{version}.zip") } -pub fn source_archive_path(name: &str, version: &str) -> String { +pub(crate) fn source_archive_path(name: &str, version: &str) -> String { format!("sources/{name}/{version}.zip") } diff --git a/src/utils/mod.rs b/src/utils/mod.rs index 921ef00f0..b6fc8c926 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -110,7 +110,7 @@ where /// }) /// .await? /// ``` -pub async fn spawn_blocking(f: F) -> Result +pub(crate) async fn spawn_blocking(f: F) -> Result where F: FnOnce() -> Result + Send + 'static, R: Send + 'static,