gpui: Update dependency package names (#40143)
This moves some of the changes made in https://github.com/zed-industries/zed/pull/39543 to the `publish_gpui` script. This PR also updates that script to use `gpui_` instead of `zed-` (where possible) Release Notes: - N/A
This commit is contained in:
870
Cargo.lock
generated
870
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
28
Cargo.toml
28
Cargo.toml
@@ -274,7 +274,7 @@ cloud_llm_client = { path = "crates/cloud_llm_client" }
|
||||
cloud_zeta2_prompt = { path = "crates/cloud_zeta2_prompt" }
|
||||
collab = { path = "crates/collab" }
|
||||
collab_ui = { path = "crates/collab_ui" }
|
||||
collections = { path = "crates/collections", package = "zed-collections", version = "0.1.0" }
|
||||
collections = { path = "crates/collections", version = "0.1.0" }
|
||||
command_palette = { path = "crates/command_palette" }
|
||||
command_palette_hooks = { path = "crates/command_palette_hooks" }
|
||||
component = { path = "crates/component" }
|
||||
@@ -290,7 +290,7 @@ debug_adapter_extension = { path = "crates/debug_adapter_extension" }
|
||||
debugger_tools = { path = "crates/debugger_tools" }
|
||||
debugger_ui = { path = "crates/debugger_ui" }
|
||||
deepseek = { path = "crates/deepseek" }
|
||||
derive_refineable = { path = "crates/refineable/derive_refineable", package = "zed-derive-refineable", version = "0.1.0" }
|
||||
derive_refineable = { path = "crates/refineable/derive_refineable" }
|
||||
diagnostics = { path = "crates/diagnostics" }
|
||||
editor = { path = "crates/editor" }
|
||||
extension = { path = "crates/extension" }
|
||||
@@ -309,10 +309,10 @@ git_ui = { path = "crates/git_ui" }
|
||||
go_to_line = { path = "crates/go_to_line" }
|
||||
google_ai = { path = "crates/google_ai" }
|
||||
gpui = { path = "crates/gpui", default-features = false }
|
||||
gpui_macros = { path = "crates/gpui_macros", package = "gpui-macros", version = "0.1.0" }
|
||||
gpui_macros = { path = "crates/gpui_macros" }
|
||||
gpui_tokio = { path = "crates/gpui_tokio" }
|
||||
html_to_markdown = { path = "crates/html_to_markdown" }
|
||||
http_client = { path = "crates/http_client", package = "zed-http-client", version = "0.1.0" }
|
||||
http_client = { path = "crates/http_client" }
|
||||
http_client_tls = { path = "crates/http_client_tls" }
|
||||
icons = { path = "crates/icons" }
|
||||
image_viewer = { path = "crates/image_viewer" }
|
||||
@@ -341,7 +341,7 @@ lsp = { path = "crates/lsp" }
|
||||
markdown = { path = "crates/markdown" }
|
||||
markdown_preview = { path = "crates/markdown_preview" }
|
||||
svg_preview = { path = "crates/svg_preview" }
|
||||
media = { path = "crates/media", package = "zed-media", version = "0.1.0" }
|
||||
media = { path = "crates/media" }
|
||||
menu = { path = "crates/menu" }
|
||||
migrator = { path = "crates/migrator" }
|
||||
mistral = { path = "crates/mistral" }
|
||||
@@ -358,7 +358,7 @@ outline = { path = "crates/outline" }
|
||||
outline_panel = { path = "crates/outline_panel" }
|
||||
panel = { path = "crates/panel" }
|
||||
paths = { path = "crates/paths" }
|
||||
perf = { path = "tooling/perf", package = "zed-perf", version = "0.1.0" }
|
||||
perf = { path = "tooling/perf" }
|
||||
picker = { path = "crates/picker" }
|
||||
plugin = { path = "crates/plugin" }
|
||||
plugin_macros = { path = "crates/plugin_macros" }
|
||||
@@ -370,7 +370,7 @@ project_symbols = { path = "crates/project_symbols" }
|
||||
prompt_store = { path = "crates/prompt_store" }
|
||||
proto = { path = "crates/proto" }
|
||||
recent_projects = { path = "crates/recent_projects" }
|
||||
refineable = { path = "crates/refineable", package = "zed-refineable", version = "0.1.0" }
|
||||
refineable = { path = "crates/refineable" }
|
||||
release_channel = { path = "crates/release_channel" }
|
||||
scheduler = { path = "crates/scheduler" }
|
||||
remote = { path = "crates/remote" }
|
||||
@@ -383,7 +383,7 @@ rope = { path = "crates/rope" }
|
||||
rpc = { path = "crates/rpc" }
|
||||
rules_library = { path = "crates/rules_library" }
|
||||
search = { path = "crates/search" }
|
||||
semantic_version = { path = "crates/semantic_version", package = "zed-semantic-version", version = "0.1.0" }
|
||||
semantic_version = { path = "crates/semantic_version" }
|
||||
session = { path = "crates/session" }
|
||||
settings = { path = "crates/settings" }
|
||||
settings_macros = { path = "crates/settings_macros" }
|
||||
@@ -396,7 +396,7 @@ sqlez_macros = { path = "crates/sqlez_macros" }
|
||||
story = { path = "crates/story" }
|
||||
storybook = { path = "crates/storybook" }
|
||||
streaming_diff = { path = "crates/streaming_diff" }
|
||||
sum_tree = { path = "crates/sum_tree", package = "zed-sum-tree", version = "0.1.0" }
|
||||
sum_tree = { path = "crates/sum_tree" }
|
||||
supermaven = { path = "crates/supermaven" }
|
||||
supermaven_api = { path = "crates/supermaven_api" }
|
||||
codestral = { path = "crates/codestral" }
|
||||
@@ -420,8 +420,8 @@ ui = { path = "crates/ui" }
|
||||
ui_input = { path = "crates/ui_input" }
|
||||
ui_macros = { path = "crates/ui_macros" }
|
||||
ui_prompt = { path = "crates/ui_prompt" }
|
||||
util = { path = "crates/util", package = "zed-util", version = "0.1.0" }
|
||||
util_macros = { path = "crates/util_macros", package = "zed-util-macros", version = "0.1.0" }
|
||||
util = { path = "crates/util" }
|
||||
util_macros = { path = "crates/util_macros" }
|
||||
vercel = { path = "crates/vercel" }
|
||||
vim = { path = "crates/vim" }
|
||||
vim_mode_setting = { path = "crates/vim_mode_setting" }
|
||||
@@ -805,7 +805,7 @@ wasmtime = { opt-level = 3 }
|
||||
activity_indicator = { codegen-units = 1 }
|
||||
assets = { codegen-units = 1 }
|
||||
breadcrumbs = { codegen-units = 1 }
|
||||
zed-collections = { codegen-units = 1 }
|
||||
collections = { codegen-units = 1 }
|
||||
command_palette = { codegen-units = 1 }
|
||||
command_palette_hooks = { codegen-units = 1 }
|
||||
extension_cli = { codegen-units = 1 }
|
||||
@@ -825,11 +825,11 @@ outline = { codegen-units = 1 }
|
||||
paths = { codegen-units = 1 }
|
||||
prettier = { codegen-units = 1 }
|
||||
project_symbols = { codegen-units = 1 }
|
||||
zed-refineable = { codegen-units = 1 }
|
||||
refineable = { codegen-units = 1 }
|
||||
release_channel = { codegen-units = 1 }
|
||||
reqwest_client = { codegen-units = 1 }
|
||||
rich_text = { codegen-units = 1 }
|
||||
zed-semantic-version = { codegen-units = 1 }
|
||||
semantic_version = { codegen-units = 1 }
|
||||
session = { codegen-units = 1 }
|
||||
snippet = { codegen-units = 1 }
|
||||
snippets_ui = { codegen-units = 1 }
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
name = "zed-collections"
|
||||
name = "collections"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
publish = true
|
||||
publish = false
|
||||
license = "Apache-2.0"
|
||||
description = "Standard collection type re-exports used by Zed and GPUI"
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
name = "gpui-macros"
|
||||
name = "gpui_macros"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
publish = true
|
||||
publish = false
|
||||
license = "Apache-2.0"
|
||||
description = "Macros used by gpui"
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
name = "zed-http-client"
|
||||
name = "http_client"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
publish = true
|
||||
publish = false
|
||||
license = "Apache-2.0"
|
||||
description = "A HTTP client library for Zed and GPUI"
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
name = "zed-media"
|
||||
name = "media"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
publish = true
|
||||
publish = false
|
||||
license = "Apache-2.0"
|
||||
description = "Bindings to macos media handling APIs for Zed"
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
name = "zed-refineable"
|
||||
name = "refineable"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
publish = true
|
||||
publish = false
|
||||
license = "Apache-2.0"
|
||||
description = "A macro for creating 'refinement' types that can be used to partially initialize or mutate a complex struct"
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
name = "zed-derive-refineable"
|
||||
name = "derive_refineable"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
publish = true
|
||||
publish = false
|
||||
license = "Apache-2.0"
|
||||
description = "A derive macro for creating refinement types in Rust"
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
name = "zed-semantic-version"
|
||||
name = "semantic_version"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
publish = true
|
||||
publish = false
|
||||
license = "Apache-2.0"
|
||||
description = "A library for working with semantic versioning in gpui and Zed"
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
name = "zed-sum-tree"
|
||||
name = "sum_tree"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
publish = true
|
||||
publish = false
|
||||
license = "Apache-2.0"
|
||||
description = "A sum tree data structure, a concurrency-friendly B-tree"
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
name = "zed-util"
|
||||
name = "util"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
publish = true
|
||||
publish = false
|
||||
license = "Apache-2.0"
|
||||
description = "A collection of utility structs and functions used by Zed and GPUI"
|
||||
|
||||
|
||||
@@ -558,7 +558,7 @@ impl PathWithPosition {
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// # use zed_util::paths::PathWithPosition;
|
||||
/// # use util::paths::PathWithPosition;
|
||||
/// # use std::path::PathBuf;
|
||||
/// assert_eq!(PathWithPosition::parse_str("test_file"), PathWithPosition {
|
||||
/// path: PathBuf::from("test_file"),
|
||||
@@ -589,7 +589,7 @@ impl PathWithPosition {
|
||||
///
|
||||
/// # Expected parsing results when encounter ill-formatted inputs.
|
||||
/// ```
|
||||
/// # use zed_util::paths::PathWithPosition;
|
||||
/// # use util::paths::PathWithPosition;
|
||||
/// # use std::path::PathBuf;
|
||||
/// assert_eq!(PathWithPosition::parse_str("test_file.rs:a"), PathWithPosition {
|
||||
/// path: PathBuf::from("test_file.rs:a"),
|
||||
|
||||
@@ -923,7 +923,7 @@ impl PartialOrd for NumericPrefixWithSuffix<'_> {
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use zed_util::capitalize;
|
||||
/// use util::capitalize;
|
||||
///
|
||||
/// assert_eq!(capitalize("hello"), "Hello");
|
||||
/// assert_eq!(capitalize("WORLD"), "WORLD");
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
name = "zed-util-macros"
|
||||
name = "util_macros"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
publish = true
|
||||
publish = false
|
||||
license = "Apache-2.0"
|
||||
description = "Utility macros for Zed"
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ use syn::{ItemFn, LitStr, parse_macro_input, parse_quote};
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// use zed_util_macros::path;
|
||||
/// use util_macros::path;
|
||||
///
|
||||
/// let path = path!("/Users/user/file.txt");
|
||||
/// #[cfg(target_os = "windows")]
|
||||
@@ -43,7 +43,7 @@ pub fn path(input: TokenStream) -> TokenStream {
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// use zed_util_macros::uri;
|
||||
/// use util_macros::uri;
|
||||
///
|
||||
/// let uri = uri!("file:///path/to/file");
|
||||
/// #[cfg(target_os = "windows")]
|
||||
@@ -69,7 +69,7 @@ pub fn uri(input: TokenStream) -> TokenStream {
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// use zed_util_macros::line_endings;
|
||||
/// use util_macros::line_endings;
|
||||
///
|
||||
/// let text = line_endings!("Hello\nWorld");
|
||||
/// #[cfg(target_os = "windows")]
|
||||
@@ -156,7 +156,7 @@ impl PerfArgs {
|
||||
///
|
||||
/// # Examples
|
||||
/// ```rust
|
||||
/// use zed_util_macros::perf;
|
||||
/// use util_macros::perf;
|
||||
///
|
||||
/// #[perf]
|
||||
/// fn generic_test() {
|
||||
@@ -172,7 +172,7 @@ impl PerfArgs {
|
||||
/// This also works with `#[gpui::test]`s, though in most cases it shouldn't
|
||||
/// be used with automatic iterations.
|
||||
/// ```rust,ignore
|
||||
/// use zed_util_macros::perf;
|
||||
/// use util_macros::perf;
|
||||
///
|
||||
/// #[perf(iterations = 1, critical)]
|
||||
/// #[gpui::test]
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
|
||||
# Ensure we're in a clean state on an up-to-date `main` branch.
|
||||
if [[ -n $(git status --short --untracked-files=no) ]]; then
|
||||
echo "can't bump versions with uncommitted changes"
|
||||
exit 1
|
||||
fi
|
||||
if [[ $(git rev-parse --abbrev-ref HEAD) != "main" ]]; then
|
||||
echo "this command must be run on main"
|
||||
exit 1
|
||||
fi
|
||||
git pull -q --ff-only origin main
|
||||
|
||||
|
||||
# Parse the current version
|
||||
version=$(script/get-crate-version gpui)
|
||||
major=$(echo $version | cut -d. -f1)
|
||||
minor=$(echo $version | cut -d. -f2)
|
||||
next_minor=$(expr $minor + 1)
|
||||
|
||||
next_minor_branch_name="bump-gpui-to-v${major}.${next_minor}.0"
|
||||
|
||||
git checkout -b ${next_minor_branch_name}
|
||||
|
||||
script/lib/bump-version.sh gpui gpui-v "" minor true
|
||||
|
||||
git checkout -q main
|
||||
45
script/bump-gpui-version
Executable file
45
script/bump-gpui-version
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Parse arguments
|
||||
bump_type=${1:-minor}
|
||||
|
||||
if [[ "$bump_type" != "minor" && "$bump_type" != "patch" ]]; then
|
||||
echo "Usage: $0 [minor|patch]"
|
||||
echo " minor (default): bumps the minor version (e.g., 0.1.0 -> 0.2.0)"
|
||||
echo " patch: bumps the patch version (e.g., 0.1.0 -> 0.1.1)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure we're in a clean state on an up-to-date `main` branch.
|
||||
if [[ -n $(git status --short --untracked-files=no) ]]; then
|
||||
echo "can't bump versions with uncommitted changes"
|
||||
exit 1
|
||||
fi
|
||||
if [[ $(git rev-parse --abbrev-ref HEAD) != "main" ]]; then
|
||||
echo "this command must be run on main"
|
||||
exit 1
|
||||
fi
|
||||
git pull -q --ff-only origin main
|
||||
|
||||
|
||||
# Parse the current version
|
||||
version=$(script/get-crate-version gpui)
|
||||
major=$(echo $version | cut -d. -f1)
|
||||
minor=$(echo $version | cut -d. -f2)
|
||||
patch=$(echo $version | cut -d. -f3)
|
||||
|
||||
if [[ "$bump_type" == "minor" ]]; then
|
||||
next_minor=$(expr $minor + 1)
|
||||
next_version="${major}.${next_minor}.0"
|
||||
else
|
||||
next_patch=$(expr $patch + 1)
|
||||
next_version="${major}.${minor}.${next_patch}"
|
||||
fi
|
||||
|
||||
branch_name="bump-gpui-to-v${next_version}"
|
||||
|
||||
git checkout -b ${branch_name}
|
||||
|
||||
script/lib/bump-version.sh gpui gpui-v "" $bump_type true
|
||||
|
||||
git checkout -q main
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "zed-perf"
|
||||
name = "perf"
|
||||
version = "0.1.0"
|
||||
publish = true
|
||||
publish = false
|
||||
edition.workspace = true
|
||||
license = "Apache-2.0"
|
||||
description = "A tool for measuring Zed test performance, with too many Clippy lints"
|
||||
|
||||
447
tooling/perf/src/implementation.rs
Normal file
447
tooling/perf/src/implementation.rs
Normal file
@@ -0,0 +1,447 @@
|
||||
//! The implementation of the this crate is kept in a separate module
|
||||
//! so that it is easy to publish this crate as part of GPUI's dependencies
|
||||
|
||||
use collections::HashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{num::NonZero, time::Duration};
|
||||
|
||||
pub mod consts {
|
||||
//! Preset idenitifiers and constants so that the profiler and proc macro agree
|
||||
//! on their communication protocol.
|
||||
|
||||
/// The suffix on the actual test function.
|
||||
pub const SUF_NORMAL: &str = "__ZED_PERF_FN";
|
||||
/// The suffix on an extra function which prints metadata about a test to stdout.
|
||||
pub const SUF_MDATA: &str = "__ZED_PERF_MDATA";
|
||||
/// The env var in which we pass the iteration count to our tests.
|
||||
pub const ITER_ENV_VAR: &str = "ZED_PERF_ITER";
|
||||
/// The prefix printed on all benchmark test metadata lines, to distinguish it from
|
||||
/// possible output by the test harness itself.
|
||||
pub const MDATA_LINE_PREF: &str = "ZED_MDATA_";
|
||||
/// The version number for the data returned from the test metadata function.
|
||||
/// Increment on non-backwards-compatible changes.
|
||||
pub const MDATA_VER: u32 = 0;
|
||||
/// The default weight, if none is specified.
|
||||
pub const WEIGHT_DEFAULT: u8 = 50;
|
||||
/// How long a test must have run to be assumed to be reliable-ish.
|
||||
pub const NOISE_CUTOFF: std::time::Duration = std::time::Duration::from_millis(250);
|
||||
|
||||
/// Identifier for the iteration count of a test metadata.
|
||||
pub const ITER_COUNT_LINE_NAME: &str = "iter_count";
|
||||
/// Identifier for the weight of a test metadata.
|
||||
pub const WEIGHT_LINE_NAME: &str = "weight";
|
||||
/// Identifier for importance in test metadata.
|
||||
pub const IMPORTANCE_LINE_NAME: &str = "importance";
|
||||
/// Identifier for the test metadata version.
|
||||
pub const VERSION_LINE_NAME: &str = "version";
|
||||
|
||||
/// Where to save json run information.
|
||||
pub const RUNS_DIR: &str = ".perf-runs";
|
||||
}
|
||||
|
||||
/// How relevant a benchmark is.
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub enum Importance {
|
||||
/// Regressions shouldn't be accepted without good reason.
|
||||
Critical = 4,
|
||||
/// Regressions should be paid extra attention.
|
||||
Important = 3,
|
||||
/// No extra attention should be paid to regressions, but they might still
|
||||
/// be indicative of something happening.
|
||||
#[default]
|
||||
Average = 2,
|
||||
/// Unclear if regressions are likely to be meaningful, but still worth keeping
|
||||
/// an eye on. Lowest level that's checked by default by the profiler.
|
||||
Iffy = 1,
|
||||
/// Regressions are likely to be spurious or don't affect core functionality.
|
||||
/// Only relevant if a lot of them happen, or as supplemental evidence for a
|
||||
/// higher-importance benchmark regressing. Not checked by default.
|
||||
Fluff = 0,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Importance {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Importance::Critical => f.write_str("critical"),
|
||||
Importance::Important => f.write_str("important"),
|
||||
Importance::Average => f.write_str("average"),
|
||||
Importance::Iffy => f.write_str("iffy"),
|
||||
Importance::Fluff => f.write_str("fluff"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Why or when did this test fail?
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum FailKind {
|
||||
/// Failed while triaging it to determine the iteration count.
|
||||
Triage,
|
||||
/// Failed while profiling it.
|
||||
Profile,
|
||||
/// Failed due to an incompatible version for the test.
|
||||
VersionMismatch,
|
||||
/// Could not parse metadata for a test.
|
||||
BadMetadata,
|
||||
/// Skipped due to filters applied on the perf run.
|
||||
Skipped,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for FailKind {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
FailKind::Triage => f.write_str("errored in triage"),
|
||||
FailKind::Profile => f.write_str("errored while profiling"),
|
||||
FailKind::VersionMismatch => f.write_str("test version mismatch"),
|
||||
FailKind::BadMetadata => f.write_str("bad test metadata"),
|
||||
FailKind::Skipped => f.write_str("skipped"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Information about a given perf test.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct TestMdata {
|
||||
/// A version number for when the test was generated. If this is greater
|
||||
/// than the version this test handler expects, one of the following will
|
||||
/// happen in an unspecified manner:
|
||||
/// - The test is skipped silently.
|
||||
/// - The handler exits with an error message indicating the version mismatch
|
||||
/// or inability to parse the metadata.
|
||||
///
|
||||
/// INVARIANT: If `version` <= `MDATA_VER`, this tool *must* be able to
|
||||
/// correctly parse the output of this test.
|
||||
pub version: u32,
|
||||
/// How many iterations to pass this test if this is preset, or how many
|
||||
/// iterations a test ended up running afterwards if determined at runtime.
|
||||
pub iterations: Option<NonZero<usize>>,
|
||||
/// The importance of this particular test. See the docs on `Importance` for
|
||||
/// details.
|
||||
pub importance: Importance,
|
||||
/// The weight of this particular test within its importance category. Used
|
||||
/// when comparing across runs.
|
||||
pub weight: u8,
|
||||
}
|
||||
|
||||
/// The actual timings of a test, as measured by Hyperfine.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct Timings {
|
||||
/// Mean runtime for `self.iter_total` runs of this test.
|
||||
pub mean: Duration,
|
||||
/// Standard deviation for the above.
|
||||
pub stddev: Duration,
|
||||
}
|
||||
|
||||
impl Timings {
|
||||
/// How many iterations does this test seem to do per second?
|
||||
#[expect(
|
||||
clippy::cast_precision_loss,
|
||||
reason = "We only care about a couple sig figs anyways"
|
||||
)]
|
||||
#[must_use]
|
||||
pub fn iters_per_sec(&self, total_iters: NonZero<usize>) -> f64 {
|
||||
(1000. / self.mean.as_millis() as f64) * total_iters.get() as f64
|
||||
}
|
||||
}
|
||||
|
||||
/// Aggregate results, meant to be used for a given importance category. Each
|
||||
/// test name corresponds to its benchmark results, iteration count, and weight.
|
||||
type CategoryInfo = HashMap<String, (Timings, NonZero<usize>, u8)>;
|
||||
|
||||
/// Aggregate output of all tests run by this handler.
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct Output {
|
||||
/// A list of test outputs. Format is `(test_name, mdata, timings)`.
|
||||
/// The latter being `Ok(_)` indicates the test succeeded.
|
||||
///
|
||||
/// INVARIANT: If the test succeeded, the second field is `Some(mdata)` and
|
||||
/// `mdata.iterations` is `Some(_)`.
|
||||
tests: Vec<(String, Option<TestMdata>, Result<Timings, FailKind>)>,
|
||||
}
|
||||
|
||||
impl Output {
|
||||
/// Instantiates an empty "output". Useful for merging.
|
||||
#[must_use]
|
||||
pub fn blank() -> Self {
|
||||
Output { tests: Vec::new() }
|
||||
}
|
||||
|
||||
/// Reports a success and adds it to this run's `Output`.
|
||||
pub fn success(
|
||||
&mut self,
|
||||
name: impl AsRef<str>,
|
||||
mut mdata: TestMdata,
|
||||
iters: NonZero<usize>,
|
||||
timings: Timings,
|
||||
) {
|
||||
mdata.iterations = Some(iters);
|
||||
self.tests
|
||||
.push((name.as_ref().to_string(), Some(mdata), Ok(timings)));
|
||||
}
|
||||
|
||||
/// Reports a failure and adds it to this run's `Output`. If this test was tried
|
||||
/// with some number of iterations (i.e. this was not a version mismatch or skipped
|
||||
/// test), it should be reported also.
|
||||
///
|
||||
/// Using the `fail!()` macro is usually more convenient.
|
||||
pub fn failure(
|
||||
&mut self,
|
||||
name: impl AsRef<str>,
|
||||
mut mdata: Option<TestMdata>,
|
||||
attempted_iters: Option<NonZero<usize>>,
|
||||
kind: FailKind,
|
||||
) {
|
||||
if let Some(ref mut mdata) = mdata {
|
||||
mdata.iterations = attempted_iters;
|
||||
}
|
||||
self.tests
|
||||
.push((name.as_ref().to_string(), mdata, Err(kind)));
|
||||
}
|
||||
|
||||
/// True if no tests executed this run.
|
||||
#[must_use]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.tests.is_empty()
|
||||
}
|
||||
|
||||
/// Sorts the runs in the output in the order that we want them printed.
|
||||
pub fn sort(&mut self) {
|
||||
self.tests.sort_unstable_by(|a, b| match (a, b) {
|
||||
// Tests where we got no metadata go at the end.
|
||||
((_, Some(_), _), (_, None, _)) => std::cmp::Ordering::Greater,
|
||||
((_, None, _), (_, Some(_), _)) => std::cmp::Ordering::Less,
|
||||
// Then sort by importance, then weight.
|
||||
((_, Some(a_mdata), _), (_, Some(b_mdata), _)) => {
|
||||
let c = a_mdata.importance.cmp(&b_mdata.importance);
|
||||
if matches!(c, std::cmp::Ordering::Equal) {
|
||||
a_mdata.weight.cmp(&b_mdata.weight)
|
||||
} else {
|
||||
c
|
||||
}
|
||||
}
|
||||
// Lastly by name.
|
||||
((a_name, ..), (b_name, ..)) => a_name.cmp(b_name),
|
||||
});
|
||||
}
|
||||
|
||||
/// Merges the output of two runs, appending a prefix to the results of the new run.
|
||||
/// To be used in conjunction with `Output::blank()`, or else only some tests will have
|
||||
/// a prefix set.
|
||||
pub fn merge<'a>(&mut self, other: Self, pref_other: impl Into<Option<&'a str>>) {
|
||||
let pref = if let Some(pref) = pref_other.into() {
|
||||
"crates/".to_string() + pref + "::"
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
self.tests = std::mem::take(&mut self.tests)
|
||||
.into_iter()
|
||||
.chain(
|
||||
other
|
||||
.tests
|
||||
.into_iter()
|
||||
.map(|(name, md, tm)| (pref.clone() + &name, md, tm)),
|
||||
)
|
||||
.collect();
|
||||
}
|
||||
|
||||
/// Evaluates the performance of `self` against `baseline`. The latter is taken
|
||||
/// as the comparison point, i.e. a positive resulting `PerfReport` means that
|
||||
/// `self` performed better.
|
||||
///
|
||||
/// # Panics
|
||||
/// `self` and `baseline` are assumed to have the iterations field on all
|
||||
/// `TestMdata`s set to `Some(_)` if the `TestMdata` is present itself.
|
||||
#[must_use]
|
||||
pub fn compare_perf(self, baseline: Self) -> PerfReport {
|
||||
let self_categories = self.collapse();
|
||||
let mut other_categories = baseline.collapse();
|
||||
|
||||
let deltas = self_categories
|
||||
.into_iter()
|
||||
.filter_map(|(cat, self_data)| {
|
||||
// Only compare categories where both meow
|
||||
// runs have data. /
|
||||
let mut other_data = other_categories.remove(&cat)?;
|
||||
let mut max = f64::MIN;
|
||||
let mut min = f64::MAX;
|
||||
|
||||
// Running totals for averaging out tests.
|
||||
let mut r_total_numerator = 0.;
|
||||
let mut r_total_denominator = 0;
|
||||
// Yeah this is O(n^2), but realistically it'll hardly be a bottleneck.
|
||||
for (name, (s_timings, s_iters, weight)) in self_data {
|
||||
// Only use the new weights if they conflict.
|
||||
let Some((o_timings, o_iters, _)) = other_data.remove(&name) else {
|
||||
continue;
|
||||
};
|
||||
let shift =
|
||||
(o_timings.iters_per_sec(o_iters) / s_timings.iters_per_sec(s_iters)) - 1.;
|
||||
if shift > max {
|
||||
max = shift;
|
||||
}
|
||||
if shift < min {
|
||||
min = shift;
|
||||
}
|
||||
r_total_numerator += shift * f64::from(weight);
|
||||
r_total_denominator += u32::from(weight);
|
||||
}
|
||||
// There were no runs here!
|
||||
if r_total_denominator == 0 {
|
||||
None
|
||||
} else {
|
||||
let mean = r_total_numerator / f64::from(r_total_denominator);
|
||||
// TODO: also aggregate standard deviation? That's harder to keep
|
||||
// meaningful, though, since we dk which tests are correlated.
|
||||
Some((cat, PerfDelta { max, mean, min }))
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
PerfReport { deltas }
|
||||
}
|
||||
|
||||
/// Collapses the `PerfReport` into a `HashMap` over `Importance`, with
|
||||
/// each importance category having its tests contained.
|
||||
fn collapse(self) -> HashMap<Importance, CategoryInfo> {
|
||||
let mut categories = HashMap::<Importance, HashMap<String, _>>::default();
|
||||
for entry in self.tests {
|
||||
if let Some(mdata) = entry.1
|
||||
&& let Ok(timings) = entry.2
|
||||
{
|
||||
if let Some(handle) = categories.get_mut(&mdata.importance) {
|
||||
handle.insert(entry.0, (timings, mdata.iterations.unwrap(), mdata.weight));
|
||||
} else {
|
||||
let mut new = HashMap::default();
|
||||
new.insert(entry.0, (timings, mdata.iterations.unwrap(), mdata.weight));
|
||||
categories.insert(mdata.importance, new);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
categories
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Output {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
// Don't print the header for an empty run.
|
||||
if self.tests.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// We want to print important tests at the top, then alphabetical.
|
||||
let mut sorted = self.clone();
|
||||
sorted.sort();
|
||||
// Markdown header for making a nice little table :>
|
||||
writeln!(
|
||||
f,
|
||||
"| Command | Iter/sec | Mean [ms] | SD [ms] | Iterations | Importance (weight) |",
|
||||
)?;
|
||||
writeln!(f, "|:---|---:|---:|---:|---:|---:|")?;
|
||||
for (name, metadata, timings) in &sorted.tests {
|
||||
match metadata {
|
||||
Some(metadata) => match timings {
|
||||
// Happy path.
|
||||
Ok(timings) => {
|
||||
// If the test succeeded, then metadata.iterations is Some(_).
|
||||
writeln!(
|
||||
f,
|
||||
"| {} | {:.2} | {} | {:.2} | {} | {} ({}) |",
|
||||
name,
|
||||
timings.iters_per_sec(metadata.iterations.unwrap()),
|
||||
{
|
||||
// Very small mean runtimes will give inaccurate
|
||||
// results. Should probably also penalise weight.
|
||||
let mean = timings.mean.as_secs_f64() * 1000.;
|
||||
if mean < consts::NOISE_CUTOFF.as_secs_f64() * 1000. / 8. {
|
||||
format!("{mean:.2} (unreliable)")
|
||||
} else {
|
||||
format!("{mean:.2}")
|
||||
}
|
||||
},
|
||||
timings.stddev.as_secs_f64() * 1000.,
|
||||
metadata.iterations.unwrap(),
|
||||
metadata.importance,
|
||||
metadata.weight,
|
||||
)?;
|
||||
}
|
||||
// We have (some) metadata, but the test errored.
|
||||
Err(err) => writeln!(
|
||||
f,
|
||||
"| ({}) {} | N/A | N/A | N/A | {} | {} ({}) |",
|
||||
err,
|
||||
name,
|
||||
metadata
|
||||
.iterations
|
||||
.map_or_else(|| "N/A".to_owned(), |i| format!("{i}")),
|
||||
metadata.importance,
|
||||
metadata.weight
|
||||
)?,
|
||||
},
|
||||
// No metadata, couldn't even parse the test output.
|
||||
None => writeln!(
|
||||
f,
|
||||
"| ({}) {} | N/A | N/A | N/A | N/A | N/A |",
|
||||
timings.as_ref().unwrap_err(),
|
||||
name
|
||||
)?,
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// The difference in performance between two runs within a given importance
|
||||
/// category.
|
||||
struct PerfDelta {
|
||||
/// The biggest improvement / least bad regression.
|
||||
max: f64,
|
||||
/// The weighted average change in test times.
|
||||
mean: f64,
|
||||
/// The worst regression / smallest improvement.
|
||||
min: f64,
|
||||
}
|
||||
|
||||
/// Shim type for reporting all performance deltas across importance categories.
|
||||
pub struct PerfReport {
|
||||
/// Inner (group, diff) pairing.
|
||||
deltas: HashMap<Importance, PerfDelta>,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PerfReport {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
if self.deltas.is_empty() {
|
||||
return write!(f, "(no matching tests)");
|
||||
}
|
||||
let sorted = self.deltas.iter().collect::<Vec<_>>();
|
||||
writeln!(f, "| Category | Max | Mean | Min |")?;
|
||||
// We don't want to print too many newlines at the end, so handle newlines
|
||||
// a little jankily like this.
|
||||
write!(f, "|:---|---:|---:|---:|")?;
|
||||
for (cat, delta) in sorted.into_iter().rev() {
|
||||
const SIGN_POS: &str = "↑";
|
||||
const SIGN_NEG: &str = "↓";
|
||||
const SIGN_NEUTRAL: &str = "±";
|
||||
|
||||
let prettify = |time: f64| {
|
||||
let sign = if time > 0.05 {
|
||||
SIGN_POS
|
||||
} else if time < 0.05 && time > -0.05 {
|
||||
SIGN_NEUTRAL
|
||||
} else {
|
||||
SIGN_NEG
|
||||
};
|
||||
format!("{} {:.1}%", sign, time.abs() * 100.)
|
||||
};
|
||||
|
||||
// Pretty-print these instead of just using the float display impl.
|
||||
write!(
|
||||
f,
|
||||
"\n| {cat} | {} | {} | {} |",
|
||||
prettify(delta.max),
|
||||
prettify(delta.mean),
|
||||
prettify(delta.min)
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -3,447 +3,5 @@
|
||||
//!
|
||||
//! For usage documentation, see the docs on this crate's binary.
|
||||
|
||||
use collections::HashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{num::NonZero, time::Duration};
|
||||
|
||||
pub mod consts {
|
||||
//! Preset idenitifiers and constants so that the profiler and proc macro agree
|
||||
//! on their communication protocol.
|
||||
|
||||
/// The suffix on the actual test function.
|
||||
pub const SUF_NORMAL: &str = "__ZED_PERF_FN";
|
||||
/// The suffix on an extra function which prints metadata about a test to stdout.
|
||||
pub const SUF_MDATA: &str = "__ZED_PERF_MDATA";
|
||||
/// The env var in which we pass the iteration count to our tests.
|
||||
pub const ITER_ENV_VAR: &str = "ZED_PERF_ITER";
|
||||
/// The prefix printed on all benchmark test metadata lines, to distinguish it from
|
||||
/// possible output by the test harness itself.
|
||||
pub const MDATA_LINE_PREF: &str = "ZED_MDATA_";
|
||||
/// The version number for the data returned from the test metadata function.
|
||||
/// Increment on non-backwards-compatible changes.
|
||||
pub const MDATA_VER: u32 = 0;
|
||||
/// The default weight, if none is specified.
|
||||
pub const WEIGHT_DEFAULT: u8 = 50;
|
||||
/// How long a test must have run to be assumed to be reliable-ish.
|
||||
pub const NOISE_CUTOFF: std::time::Duration = std::time::Duration::from_millis(250);
|
||||
|
||||
/// Identifier for the iteration count of a test metadata.
|
||||
pub const ITER_COUNT_LINE_NAME: &str = "iter_count";
|
||||
/// Identifier for the weight of a test metadata.
|
||||
pub const WEIGHT_LINE_NAME: &str = "weight";
|
||||
/// Identifier for importance in test metadata.
|
||||
pub const IMPORTANCE_LINE_NAME: &str = "importance";
|
||||
/// Identifier for the test metadata version.
|
||||
pub const VERSION_LINE_NAME: &str = "version";
|
||||
|
||||
/// Where to save json run information.
|
||||
pub const RUNS_DIR: &str = ".perf-runs";
|
||||
}
|
||||
|
||||
/// How relevant a benchmark is.
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub enum Importance {
|
||||
/// Regressions shouldn't be accepted without good reason.
|
||||
Critical = 4,
|
||||
/// Regressions should be paid extra attention.
|
||||
Important = 3,
|
||||
/// No extra attention should be paid to regressions, but they might still
|
||||
/// be indicative of something happening.
|
||||
#[default]
|
||||
Average = 2,
|
||||
/// Unclear if regressions are likely to be meaningful, but still worth keeping
|
||||
/// an eye on. Lowest level that's checked by default by the profiler.
|
||||
Iffy = 1,
|
||||
/// Regressions are likely to be spurious or don't affect core functionality.
|
||||
/// Only relevant if a lot of them happen, or as supplemental evidence for a
|
||||
/// higher-importance benchmark regressing. Not checked by default.
|
||||
Fluff = 0,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Importance {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Importance::Critical => f.write_str("critical"),
|
||||
Importance::Important => f.write_str("important"),
|
||||
Importance::Average => f.write_str("average"),
|
||||
Importance::Iffy => f.write_str("iffy"),
|
||||
Importance::Fluff => f.write_str("fluff"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Why or when did this test fail?
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum FailKind {
|
||||
/// Failed while triaging it to determine the iteration count.
|
||||
Triage,
|
||||
/// Failed while profiling it.
|
||||
Profile,
|
||||
/// Failed due to an incompatible version for the test.
|
||||
VersionMismatch,
|
||||
/// Could not parse metadata for a test.
|
||||
BadMetadata,
|
||||
/// Skipped due to filters applied on the perf run.
|
||||
Skipped,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for FailKind {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
FailKind::Triage => f.write_str("errored in triage"),
|
||||
FailKind::Profile => f.write_str("errored while profiling"),
|
||||
FailKind::VersionMismatch => f.write_str("test version mismatch"),
|
||||
FailKind::BadMetadata => f.write_str("bad test metadata"),
|
||||
FailKind::Skipped => f.write_str("skipped"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Information about a given perf test.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct TestMdata {
|
||||
/// A version number for when the test was generated. If this is greater
|
||||
/// than the version this test handler expects, one of the following will
|
||||
/// happen in an unspecified manner:
|
||||
/// - The test is skipped silently.
|
||||
/// - The handler exits with an error message indicating the version mismatch
|
||||
/// or inability to parse the metadata.
|
||||
///
|
||||
/// INVARIANT: If `version` <= `MDATA_VER`, this tool *must* be able to
|
||||
/// correctly parse the output of this test.
|
||||
pub version: u32,
|
||||
/// How many iterations to pass this test if this is preset, or how many
|
||||
/// iterations a test ended up running afterwards if determined at runtime.
|
||||
pub iterations: Option<NonZero<usize>>,
|
||||
/// The importance of this particular test. See the docs on `Importance` for
|
||||
/// details.
|
||||
pub importance: Importance,
|
||||
/// The weight of this particular test within its importance category. Used
|
||||
/// when comparing across runs.
|
||||
pub weight: u8,
|
||||
}
|
||||
|
||||
/// The actual timings of a test, as measured by Hyperfine.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct Timings {
|
||||
/// Mean runtime for `self.iter_total` runs of this test.
|
||||
pub mean: Duration,
|
||||
/// Standard deviation for the above.
|
||||
pub stddev: Duration,
|
||||
}
|
||||
|
||||
impl Timings {
|
||||
/// How many iterations does this test seem to do per second?
|
||||
#[expect(
|
||||
clippy::cast_precision_loss,
|
||||
reason = "We only care about a couple sig figs anyways"
|
||||
)]
|
||||
#[must_use]
|
||||
pub fn iters_per_sec(&self, total_iters: NonZero<usize>) -> f64 {
|
||||
(1000. / self.mean.as_millis() as f64) * total_iters.get() as f64
|
||||
}
|
||||
}
|
||||
|
||||
/// Aggregate results, meant to be used for a given importance category. Each
|
||||
/// test name corresponds to its benchmark results, iteration count, and weight.
|
||||
type CategoryInfo = HashMap<String, (Timings, NonZero<usize>, u8)>;
|
||||
|
||||
/// Aggregate output of all tests run by this handler.
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct Output {
|
||||
/// A list of test outputs. Format is `(test_name, mdata, timings)`.
|
||||
/// The latter being `Ok(_)` indicates the test succeeded.
|
||||
///
|
||||
/// INVARIANT: If the test succeeded, the second field is `Some(mdata)` and
|
||||
/// `mdata.iterations` is `Some(_)`.
|
||||
tests: Vec<(String, Option<TestMdata>, Result<Timings, FailKind>)>,
|
||||
}
|
||||
|
||||
impl Output {
|
||||
/// Instantiates an empty "output". Useful for merging.
|
||||
#[must_use]
|
||||
pub fn blank() -> Self {
|
||||
Output { tests: Vec::new() }
|
||||
}
|
||||
|
||||
/// Reports a success and adds it to this run's `Output`.
|
||||
pub fn success(
|
||||
&mut self,
|
||||
name: impl AsRef<str>,
|
||||
mut mdata: TestMdata,
|
||||
iters: NonZero<usize>,
|
||||
timings: Timings,
|
||||
) {
|
||||
mdata.iterations = Some(iters);
|
||||
self.tests
|
||||
.push((name.as_ref().to_string(), Some(mdata), Ok(timings)));
|
||||
}
|
||||
|
||||
/// Reports a failure and adds it to this run's `Output`. If this test was tried
|
||||
/// with some number of iterations (i.e. this was not a version mismatch or skipped
|
||||
/// test), it should be reported also.
|
||||
///
|
||||
/// Using the `fail!()` macro is usually more convenient.
|
||||
pub fn failure(
|
||||
&mut self,
|
||||
name: impl AsRef<str>,
|
||||
mut mdata: Option<TestMdata>,
|
||||
attempted_iters: Option<NonZero<usize>>,
|
||||
kind: FailKind,
|
||||
) {
|
||||
if let Some(ref mut mdata) = mdata {
|
||||
mdata.iterations = attempted_iters;
|
||||
}
|
||||
self.tests
|
||||
.push((name.as_ref().to_string(), mdata, Err(kind)));
|
||||
}
|
||||
|
||||
/// True if no tests executed this run.
|
||||
#[must_use]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.tests.is_empty()
|
||||
}
|
||||
|
||||
/// Sorts the runs in the output in the order that we want them printed.
|
||||
pub fn sort(&mut self) {
|
||||
self.tests.sort_unstable_by(|a, b| match (a, b) {
|
||||
// Tests where we got no metadata go at the end.
|
||||
((_, Some(_), _), (_, None, _)) => std::cmp::Ordering::Greater,
|
||||
((_, None, _), (_, Some(_), _)) => std::cmp::Ordering::Less,
|
||||
// Then sort by importance, then weight.
|
||||
((_, Some(a_mdata), _), (_, Some(b_mdata), _)) => {
|
||||
let c = a_mdata.importance.cmp(&b_mdata.importance);
|
||||
if matches!(c, std::cmp::Ordering::Equal) {
|
||||
a_mdata.weight.cmp(&b_mdata.weight)
|
||||
} else {
|
||||
c
|
||||
}
|
||||
}
|
||||
// Lastly by name.
|
||||
((a_name, ..), (b_name, ..)) => a_name.cmp(b_name),
|
||||
});
|
||||
}
|
||||
|
||||
/// Merges the output of two runs, appending a prefix to the results of the new run.
|
||||
/// To be used in conjunction with `Output::blank()`, or else only some tests will have
|
||||
/// a prefix set.
|
||||
pub fn merge<'a>(&mut self, other: Self, pref_other: impl Into<Option<&'a str>>) {
|
||||
let pref = if let Some(pref) = pref_other.into() {
|
||||
"crates/".to_string() + pref + "::"
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
self.tests = std::mem::take(&mut self.tests)
|
||||
.into_iter()
|
||||
.chain(
|
||||
other
|
||||
.tests
|
||||
.into_iter()
|
||||
.map(|(name, md, tm)| (pref.clone() + &name, md, tm)),
|
||||
)
|
||||
.collect();
|
||||
}
|
||||
|
||||
/// Evaluates the performance of `self` against `baseline`. The latter is taken
|
||||
/// as the comparison point, i.e. a positive resulting `PerfReport` means that
|
||||
/// `self` performed better.
|
||||
///
|
||||
/// # Panics
|
||||
/// `self` and `baseline` are assumed to have the iterations field on all
|
||||
/// `TestMdata`s set to `Some(_)` if the `TestMdata` is present itself.
|
||||
#[must_use]
|
||||
pub fn compare_perf(self, baseline: Self) -> PerfReport {
|
||||
let self_categories = self.collapse();
|
||||
let mut other_categories = baseline.collapse();
|
||||
|
||||
let deltas = self_categories
|
||||
.into_iter()
|
||||
.filter_map(|(cat, self_data)| {
|
||||
// Only compare categories where both meow
|
||||
// runs have data. /
|
||||
let mut other_data = other_categories.remove(&cat)?;
|
||||
let mut max = f64::MIN;
|
||||
let mut min = f64::MAX;
|
||||
|
||||
// Running totals for averaging out tests.
|
||||
let mut r_total_numerator = 0.;
|
||||
let mut r_total_denominator = 0;
|
||||
// Yeah this is O(n^2), but realistically it'll hardly be a bottleneck.
|
||||
for (name, (s_timings, s_iters, weight)) in self_data {
|
||||
// Only use the new weights if they conflict.
|
||||
let Some((o_timings, o_iters, _)) = other_data.remove(&name) else {
|
||||
continue;
|
||||
};
|
||||
let shift =
|
||||
(o_timings.iters_per_sec(o_iters) / s_timings.iters_per_sec(s_iters)) - 1.;
|
||||
if shift > max {
|
||||
max = shift;
|
||||
}
|
||||
if shift < min {
|
||||
min = shift;
|
||||
}
|
||||
r_total_numerator += shift * f64::from(weight);
|
||||
r_total_denominator += u32::from(weight);
|
||||
}
|
||||
// There were no runs here!
|
||||
if r_total_denominator == 0 {
|
||||
None
|
||||
} else {
|
||||
let mean = r_total_numerator / f64::from(r_total_denominator);
|
||||
// TODO: also aggregate standard deviation? That's harder to keep
|
||||
// meaningful, though, since we dk which tests are correlated.
|
||||
Some((cat, PerfDelta { max, mean, min }))
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
PerfReport { deltas }
|
||||
}
|
||||
|
||||
/// Collapses the `PerfReport` into a `HashMap` over `Importance`, with
|
||||
/// each importance category having its tests contained.
|
||||
fn collapse(self) -> HashMap<Importance, CategoryInfo> {
|
||||
let mut categories = HashMap::<Importance, HashMap<String, _>>::default();
|
||||
for entry in self.tests {
|
||||
if let Some(mdata) = entry.1
|
||||
&& let Ok(timings) = entry.2
|
||||
{
|
||||
if let Some(handle) = categories.get_mut(&mdata.importance) {
|
||||
handle.insert(entry.0, (timings, mdata.iterations.unwrap(), mdata.weight));
|
||||
} else {
|
||||
let mut new = HashMap::default();
|
||||
new.insert(entry.0, (timings, mdata.iterations.unwrap(), mdata.weight));
|
||||
categories.insert(mdata.importance, new);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
categories
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Output {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
// Don't print the header for an empty run.
|
||||
if self.tests.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// We want to print important tests at the top, then alphabetical.
|
||||
let mut sorted = self.clone();
|
||||
sorted.sort();
|
||||
// Markdown header for making a nice little table :>
|
||||
writeln!(
|
||||
f,
|
||||
"| Command | Iter/sec | Mean [ms] | SD [ms] | Iterations | Importance (weight) |",
|
||||
)?;
|
||||
writeln!(f, "|:---|---:|---:|---:|---:|---:|")?;
|
||||
for (name, metadata, timings) in &sorted.tests {
|
||||
match metadata {
|
||||
Some(metadata) => match timings {
|
||||
// Happy path.
|
||||
Ok(timings) => {
|
||||
// If the test succeeded, then metadata.iterations is Some(_).
|
||||
writeln!(
|
||||
f,
|
||||
"| {} | {:.2} | {} | {:.2} | {} | {} ({}) |",
|
||||
name,
|
||||
timings.iters_per_sec(metadata.iterations.unwrap()),
|
||||
{
|
||||
// Very small mean runtimes will give inaccurate
|
||||
// results. Should probably also penalise weight.
|
||||
let mean = timings.mean.as_secs_f64() * 1000.;
|
||||
if mean < consts::NOISE_CUTOFF.as_secs_f64() * 1000. / 8. {
|
||||
format!("{mean:.2} (unreliable)")
|
||||
} else {
|
||||
format!("{mean:.2}")
|
||||
}
|
||||
},
|
||||
timings.stddev.as_secs_f64() * 1000.,
|
||||
metadata.iterations.unwrap(),
|
||||
metadata.importance,
|
||||
metadata.weight,
|
||||
)?;
|
||||
}
|
||||
// We have (some) metadata, but the test errored.
|
||||
Err(err) => writeln!(
|
||||
f,
|
||||
"| ({}) {} | N/A | N/A | N/A | {} | {} ({}) |",
|
||||
err,
|
||||
name,
|
||||
metadata
|
||||
.iterations
|
||||
.map_or_else(|| "N/A".to_owned(), |i| format!("{i}")),
|
||||
metadata.importance,
|
||||
metadata.weight
|
||||
)?,
|
||||
},
|
||||
// No metadata, couldn't even parse the test output.
|
||||
None => writeln!(
|
||||
f,
|
||||
"| ({}) {} | N/A | N/A | N/A | N/A | N/A |",
|
||||
timings.as_ref().unwrap_err(),
|
||||
name
|
||||
)?,
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// The difference in performance between two runs within a given importance
|
||||
/// category.
|
||||
struct PerfDelta {
|
||||
/// The biggest improvement / least bad regression.
|
||||
max: f64,
|
||||
/// The weighted average change in test times.
|
||||
mean: f64,
|
||||
/// The worst regression / smallest improvement.
|
||||
min: f64,
|
||||
}
|
||||
|
||||
/// Shim type for reporting all performance deltas across importance categories.
|
||||
pub struct PerfReport {
|
||||
/// Inner (group, diff) pairing.
|
||||
deltas: HashMap<Importance, PerfDelta>,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PerfReport {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
if self.deltas.is_empty() {
|
||||
return write!(f, "(no matching tests)");
|
||||
}
|
||||
let sorted = self.deltas.iter().collect::<Vec<_>>();
|
||||
writeln!(f, "| Category | Max | Mean | Min |")?;
|
||||
// We don't want to print too many newlines at the end, so handle newlines
|
||||
// a little jankily like this.
|
||||
write!(f, "|:---|---:|---:|---:|")?;
|
||||
for (cat, delta) in sorted.into_iter().rev() {
|
||||
const SIGN_POS: &str = "↑";
|
||||
const SIGN_NEG: &str = "↓";
|
||||
const SIGN_NEUTRAL: &str = "±";
|
||||
|
||||
let prettify = |time: f64| {
|
||||
let sign = if time > 0.05 {
|
||||
SIGN_POS
|
||||
} else if time < 0.05 && time > -0.05 {
|
||||
SIGN_NEUTRAL
|
||||
} else {
|
||||
SIGN_NEG
|
||||
};
|
||||
format!("{} {:.1}%", sign, time.abs() * 100.)
|
||||
};
|
||||
|
||||
// Pretty-print these instead of just using the float display impl.
|
||||
write!(
|
||||
f,
|
||||
"\n| {cat} | {} | {} | {} |",
|
||||
prettify(delta.max),
|
||||
prettify(delta.mean),
|
||||
prettify(delta.min)
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
mod implementation;
|
||||
pub use implementation::*;
|
||||
|
||||
@@ -46,7 +46,9 @@
|
||||
//! This should probably not be called manually unless you're working on the profiler
|
||||
//! itself; use the `cargo perf-test` alias (after building this crate) instead.
|
||||
|
||||
use zed_perf::{FailKind, Importance, Output, TestMdata, Timings, consts};
|
||||
mod implementation;
|
||||
|
||||
use implementation::{FailKind, Importance, Output, TestMdata, Timings, consts};
|
||||
|
||||
use std::{
|
||||
fs::OpenOptions,
|
||||
|
||||
@@ -7,13 +7,13 @@ use clap::Parser;
|
||||
|
||||
#[derive(Parser)]
|
||||
pub struct PublishGpuiArgs {
|
||||
/// Optional pre-release identifier to append to the version (e.g., alpha, test.1). Always bumps the minor version.
|
||||
#[arg(long)]
|
||||
pre_release: Option<String>,
|
||||
|
||||
/// Perform a dry-run and wait for user confirmation before each publish
|
||||
#[arg(long)]
|
||||
dry_run: bool,
|
||||
|
||||
/// Skip to a specific package (by package name or crate name) and start from there
|
||||
#[arg(long)]
|
||||
skip_to: Option<String>,
|
||||
}
|
||||
|
||||
pub fn run_publish_gpui(args: PublishGpuiArgs) -> Result<()> {
|
||||
@@ -24,12 +24,16 @@ pub fn run_publish_gpui(args: PublishGpuiArgs) -> Result<()> {
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
check_workspace_root()?;
|
||||
ensure_cargo_set_version()?;
|
||||
check_git_clean()?;
|
||||
|
||||
if args.skip_to.is_none() {
|
||||
check_git_clean()?;
|
||||
} else {
|
||||
println!("Skipping git clean check due to --skip-to flag");
|
||||
}
|
||||
|
||||
let version = read_gpui_version()?;
|
||||
println!("Updating GPUI to version: {}", version);
|
||||
publish_dependencies(&version, args.dry_run)?;
|
||||
publish_dependencies(&version, args.dry_run, args.skip_to.as_deref())?;
|
||||
publish_gpui(&version, args.dry_run)?;
|
||||
println!("GPUI published in {}s", start_time.elapsed().as_secs_f32());
|
||||
Ok(())
|
||||
@@ -52,62 +56,106 @@ fn read_gpui_version() -> Result<String> {
|
||||
Ok(version.to_string())
|
||||
}
|
||||
|
||||
fn publish_dependencies(new_version: &str, dry_run: bool) -> Result<()> {
|
||||
fn publish_dependencies(new_version: &str, dry_run: bool, skip_to: Option<&str>) -> Result<()> {
|
||||
let gpui_dependencies = vec![
|
||||
("zed-collections", "collections"),
|
||||
("zed-perf", "perf"),
|
||||
("zed-util-macros", "util_macros"),
|
||||
("zed-util", "util"),
|
||||
("gpui-macros", "gpui_macros"),
|
||||
("zed-http-client", "http_client"),
|
||||
("zed-derive-refineable", "derive_refineable"),
|
||||
("zed-refineable", "refineable"),
|
||||
("zed-semantic-version", "semantic_version"),
|
||||
("zed-sum-tree", "sum_tree"),
|
||||
("zed-media", "media"),
|
||||
("collections", "gpui_collections", "crates"),
|
||||
("perf", "gpui_perf", "tooling"),
|
||||
("util_macros", "gpui_util_macros", "crates"),
|
||||
("util", "gpui_util", "crates"),
|
||||
("gpui_macros", "gpui-macros", "crates"),
|
||||
("http_client", "gpui_http_client", "crates"),
|
||||
(
|
||||
"derive_refineable",
|
||||
"gpui_derive_refineable",
|
||||
"crates/refineable",
|
||||
),
|
||||
("refineable", "gpui_refineable", "crates"),
|
||||
("semantic_version", "gpui_semantic_version", "crates"),
|
||||
("sum_tree", "gpui_sum_tree", "crates"),
|
||||
("media", "gpui_media", "crates"),
|
||||
];
|
||||
|
||||
for (crate_name, package_name) in gpui_dependencies {
|
||||
let mut should_skip = skip_to.is_some();
|
||||
let skip_target = skip_to.unwrap_or("");
|
||||
|
||||
for (package_name, crate_name, package_dir) in gpui_dependencies {
|
||||
if should_skip {
|
||||
if package_name == skip_target || crate_name == skip_target {
|
||||
println!("Found skip target: {} ({})", crate_name, package_name);
|
||||
should_skip = false;
|
||||
} else {
|
||||
println!("Skipping: {} ({})", crate_name, package_name);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
println!(
|
||||
"Publishing dependency: {} (package: {})",
|
||||
crate_name, package_name
|
||||
);
|
||||
|
||||
update_crate_version(crate_name, new_version)?;
|
||||
update_workspace_dependency_version(package_name, new_version)?;
|
||||
update_crate_cargo_toml(package_name, crate_name, package_dir, new_version)?;
|
||||
update_workspace_dependency_version(package_name, crate_name, new_version)?;
|
||||
publish_crate(crate_name, dry_run)?;
|
||||
}
|
||||
|
||||
// println!("Waiting 60s for the rate limit...");
|
||||
// thread::sleep(Duration::from_secs(60));
|
||||
if should_skip {
|
||||
bail!(
|
||||
"Could not find package or crate named '{}' to skip to",
|
||||
skip_target
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn publish_gpui(new_version: &str, dry_run: bool) -> Result<()> {
|
||||
update_crate_version("gpui", new_version)?;
|
||||
update_crate_cargo_toml("gpui", "gpui", "crates", new_version)?;
|
||||
|
||||
publish_crate("gpui", dry_run)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_crate_version(package_name: &str, new_version: &str) -> Result<()> {
|
||||
let output = run_command(
|
||||
Command::new("cargo")
|
||||
.arg("set-version")
|
||||
.arg("--package")
|
||||
.arg(package_name)
|
||||
.arg(new_version),
|
||||
)?;
|
||||
fn update_crate_cargo_toml(
|
||||
package_name: &str,
|
||||
crate_name: &str,
|
||||
package_dir: &str,
|
||||
new_version: &str,
|
||||
) -> Result<()> {
|
||||
let cargo_toml_path = format!("{}/{}/Cargo.toml", package_dir, package_name);
|
||||
let contents = std::fs::read_to_string(&cargo_toml_path)
|
||||
.context(format!("Failed to read {}", cargo_toml_path))?;
|
||||
|
||||
if !output.status.success() {
|
||||
bail!("Failed to set version for package {}", package_name);
|
||||
}
|
||||
let updated = update_crate_package_fields(&contents, crate_name, new_version)?;
|
||||
|
||||
std::fs::write(&cargo_toml_path, updated)
|
||||
.context(format!("Failed to write {}", cargo_toml_path))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_crate_package_fields(
|
||||
toml_contents: &str,
|
||||
crate_name: &str,
|
||||
new_version: &str,
|
||||
) -> Result<String> {
|
||||
let mut doc = toml_contents
|
||||
.parse::<toml_edit::DocumentMut>()
|
||||
.context("Failed to parse TOML")?;
|
||||
|
||||
let package = doc
|
||||
.get_mut("package")
|
||||
.and_then(|p| p.as_table_like_mut())
|
||||
.context("Failed to find [package] section")?;
|
||||
|
||||
package.insert("name", toml_edit::value(crate_name));
|
||||
package.insert("version", toml_edit::value(new_version));
|
||||
package.insert("publish", toml_edit::value(true));
|
||||
|
||||
Ok(doc.to_string())
|
||||
}
|
||||
|
||||
fn publish_crate(crate_name: &str, dry_run: bool) -> Result<()> {
|
||||
let publish_crate_impl = |crate_name, dry_run| {
|
||||
let cargo = std::env::var("CARGO").unwrap_or_else(|_| "cargo".to_string());
|
||||
@@ -142,29 +190,34 @@ fn publish_crate(crate_name: &str, dry_run: bool) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_workspace_dependency_version(package_name: &str, new_version: &str) -> Result<()> {
|
||||
fn update_workspace_dependency_version(
|
||||
package_name: &str,
|
||||
crate_name: &str,
|
||||
new_version: &str,
|
||||
) -> Result<()> {
|
||||
let workspace_cargo_toml_path = "Cargo.toml";
|
||||
let contents = std::fs::read_to_string(workspace_cargo_toml_path)
|
||||
.context("Failed to read workspace Cargo.toml")?;
|
||||
|
||||
let updated = update_dependency_version_in_toml(&contents, package_name, new_version)?;
|
||||
let mut doc = contents
|
||||
.parse::<toml_edit::DocumentMut>()
|
||||
.context("Failed to parse TOML")?;
|
||||
|
||||
std::fs::write(workspace_cargo_toml_path, updated)
|
||||
update_dependency_version_in_doc(&mut doc, package_name, crate_name, new_version)?;
|
||||
update_profile_override_in_doc(&mut doc, package_name, crate_name)?;
|
||||
|
||||
std::fs::write(workspace_cargo_toml_path, doc.to_string())
|
||||
.context("Failed to write workspace Cargo.toml")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_dependency_version_in_toml(
|
||||
toml_contents: &str,
|
||||
fn update_dependency_version_in_doc(
|
||||
doc: &mut toml_edit::DocumentMut,
|
||||
package_name: &str,
|
||||
crate_name: &str,
|
||||
new_version: &str,
|
||||
) -> Result<String> {
|
||||
let mut doc = toml_contents
|
||||
.parse::<toml_edit::DocumentMut>()
|
||||
.context("Failed to parse TOML")?;
|
||||
|
||||
// Navigate to workspace.dependencies.<package_name>
|
||||
) -> Result<()> {
|
||||
let dependency = doc
|
||||
.get_mut("workspace")
|
||||
.and_then(|w| w.get_mut("dependencies"))
|
||||
@@ -174,21 +227,35 @@ fn update_dependency_version_in_toml(
|
||||
package_name
|
||||
))?;
|
||||
|
||||
// Update the version field if it exists
|
||||
if let Some(dep_table) = dependency.as_table_like_mut() {
|
||||
if dep_table.contains_key("version") {
|
||||
dep_table.insert("version", toml_edit::value(new_version));
|
||||
} else {
|
||||
bail!(
|
||||
"No version field found for {} in workspace dependencies",
|
||||
package_name
|
||||
);
|
||||
}
|
||||
dep_table.insert("version", toml_edit::value(new_version));
|
||||
dep_table.insert("package", toml_edit::value(crate_name));
|
||||
} else {
|
||||
bail!("{} is not a table in workspace dependencies", package_name);
|
||||
}
|
||||
|
||||
Ok(doc.to_string())
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_profile_override_in_doc(
|
||||
doc: &mut toml_edit::DocumentMut,
|
||||
package_name: &str,
|
||||
crate_name: &str,
|
||||
) -> Result<()> {
|
||||
if let Some(profile_dev_package) = doc
|
||||
.get_mut("profile")
|
||||
.and_then(|p| p.get_mut("dev"))
|
||||
.and_then(|d| d.get_mut("package"))
|
||||
.and_then(|p| p.as_table_like_mut())
|
||||
{
|
||||
if let Some(old_entry) = profile_dev_package.get(package_name) {
|
||||
let old_entry_clone = old_entry.clone();
|
||||
profile_dev_package.remove(package_name);
|
||||
profile_dev_package.insert(crate_name, old_entry_clone);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_workspace_root() -> Result<()> {
|
||||
@@ -215,27 +282,6 @@ fn check_workspace_root() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ensure_cargo_set_version() -> Result<()> {
|
||||
let output = run_command(
|
||||
Command::new("which")
|
||||
.arg("cargo-set-version")
|
||||
.stdout(Stdio::piped()),
|
||||
)
|
||||
.context("Failed to check for cargo-set-version")?;
|
||||
|
||||
if !output.status.success() {
|
||||
println!("cargo-set-version not found. Installing cargo-edit...");
|
||||
|
||||
let install_output = run_command(Command::new("cargo").arg("install").arg("cargo-edit"))?;
|
||||
|
||||
if !install_output.status.success() {
|
||||
bail!("Failed to install cargo-edit");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_git_clean() -> Result<()> {
|
||||
let output = run_command(
|
||||
Command::new("git")
|
||||
@@ -281,6 +327,10 @@ fn run_command(command: &mut Command) -> Result<Output> {
|
||||
.wait_with_output()
|
||||
.context("failed to wait for child process")?;
|
||||
|
||||
if !output.status.success() {
|
||||
bail!("Command failed with status {}", output.status);
|
||||
}
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
@@ -298,12 +348,17 @@ mod tests {
|
||||
|
||||
[workspace.dependencies]
|
||||
# here's a comment
|
||||
collections = { path = "crates/collections", package = "zed-collections", version = "0.1.0" }
|
||||
collections = { path = "crates/collections" }
|
||||
|
||||
util = { path = "crates/util", package = "zed-util", version = "0.1.0" }
|
||||
"#};
|
||||
|
||||
let result = update_dependency_version_in_toml(input, "collections", "0.2.0").unwrap();
|
||||
let mut doc = input.parse::<toml_edit::DocumentMut>().unwrap();
|
||||
|
||||
update_dependency_version_in_doc(&mut doc, "collections", "gpui_collections", "0.2.0")
|
||||
.unwrap();
|
||||
|
||||
let result = doc.to_string();
|
||||
|
||||
let output = indoc! {r#"
|
||||
[workspace]
|
||||
@@ -311,11 +366,77 @@ mod tests {
|
||||
|
||||
[workspace.dependencies]
|
||||
# here's a comment
|
||||
collections = { path = "crates/collections", package = "zed-collections", version = "0.2.0" }
|
||||
collections = { path = "crates/collections" , version = "0.2.0", package = "gpui_collections" }
|
||||
|
||||
util = { path = "crates/util", package = "zed-util", version = "0.1.0" }
|
||||
"#};
|
||||
|
||||
assert_eq!(result, output);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_crate_package_fields() {
|
||||
let input = indoc! {r#"
|
||||
[package]
|
||||
name = "collections"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
publish = false
|
||||
# some comment about the license
|
||||
license = "GPL-3.0-or-later"
|
||||
|
||||
[dependencies]
|
||||
serde = "1.0"
|
||||
"#};
|
||||
|
||||
let result = update_crate_package_fields(input, "gpui_collections", "0.2.0").unwrap();
|
||||
|
||||
let output = indoc! {r#"
|
||||
[package]
|
||||
name = "gpui_collections"
|
||||
version = "0.2.0"
|
||||
edition = "2021"
|
||||
publish = true
|
||||
# some comment about the license
|
||||
license = "GPL-3.0-or-later"
|
||||
|
||||
[dependencies]
|
||||
serde = "1.0"
|
||||
"#};
|
||||
|
||||
assert_eq!(result, output);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_profile_override_in_toml() {
|
||||
let input = indoc! {r#"
|
||||
[profile.dev]
|
||||
split-debuginfo = "unpacked"
|
||||
|
||||
[profile.dev.package]
|
||||
taffy = { opt-level = 3 }
|
||||
collections = { codegen-units = 256 }
|
||||
refineable = { codegen-units = 256 }
|
||||
util = { codegen-units = 256 }
|
||||
"#};
|
||||
|
||||
let mut doc = input.parse::<toml_edit::DocumentMut>().unwrap();
|
||||
|
||||
update_profile_override_in_doc(&mut doc, "collections", "gpui_collections").unwrap();
|
||||
|
||||
let result = doc.to_string();
|
||||
|
||||
let output = indoc! {r#"
|
||||
[profile.dev]
|
||||
split-debuginfo = "unpacked"
|
||||
|
||||
[profile.dev.package]
|
||||
taffy = { opt-level = 3 }
|
||||
refineable = { codegen-units = 256 }
|
||||
util = { codegen-units = 256 }
|
||||
gpui_collections = { codegen-units = 256 }
|
||||
"#};
|
||||
|
||||
assert_eq!(result, output);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user