Compare commits
13 Commits
update-rul
...
git-panel-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cb4067723b | ||
|
|
1c625f8783 | ||
|
|
4adec27a3d | ||
|
|
e8daab15ab | ||
|
|
6501b0c311 | ||
|
|
6c0069ca98 | ||
|
|
c8930e07a3 | ||
|
|
ab352f669e | ||
|
|
e79188261b | ||
|
|
ab62739605 | ||
|
|
cfbde91833 | ||
|
|
80b32ddaad | ||
|
|
53652cdb3f |
3
Cargo.lock
generated
3
Cargo.lock
generated
@@ -21170,7 +21170,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zed"
|
||||
version = "0.213.0"
|
||||
version = "0.214.0"
|
||||
dependencies = [
|
||||
"acp_tools",
|
||||
"activity_indicator",
|
||||
@@ -21719,6 +21719,7 @@ dependencies = [
|
||||
"serde_json",
|
||||
"settings",
|
||||
"smol",
|
||||
"strsim",
|
||||
"thiserror 2.0.17",
|
||||
"util",
|
||||
"uuid",
|
||||
|
||||
@@ -1550,6 +1550,8 @@
|
||||
// Default: 10_000, maximum: 100_000 (all bigger values set will be treated as 100_000), 0 disables the scrolling.
|
||||
// Existing terminals will not pick up this change until they are recreated.
|
||||
"max_scroll_history_lines": 10000,
|
||||
// The multiplier for scrolling speed in the terminal.
|
||||
"scroll_multiplier": 1.0,
|
||||
// The minimum APCA perceptual contrast between foreground and background colors.
|
||||
// APCA (Accessible Perceptual Contrast Algorithm) is more accurate than WCAG 2.x,
|
||||
// especially for dark mode. Values range from 0 to 106.
|
||||
|
||||
@@ -44,6 +44,25 @@ pub async fn get_buffer_content_or_outline(
|
||||
.collect::<Vec<_>>()
|
||||
})?;
|
||||
|
||||
// If no outline exists, fall back to first 1KB so the agent has some context
|
||||
if outline_items.is_empty() {
|
||||
let text = buffer.read_with(cx, |buffer, _| {
|
||||
let snapshot = buffer.snapshot();
|
||||
let len = snapshot.len().min(1024);
|
||||
let content = snapshot.text_for_range(0..len).collect::<String>();
|
||||
if let Some(path) = path {
|
||||
format!("# First 1KB of {path} (file too large to show full content, and no outline available)\n\n{content}")
|
||||
} else {
|
||||
format!("# First 1KB of file (file too large to show full content, and no outline available)\n\n{content}")
|
||||
}
|
||||
})?;
|
||||
|
||||
return Ok(BufferContent {
|
||||
text,
|
||||
is_outline: false,
|
||||
});
|
||||
}
|
||||
|
||||
let outline_text = render_outline(outline_items, None, 0, usize::MAX).await?;
|
||||
|
||||
let text = if let Some(path) = path {
|
||||
@@ -140,3 +159,62 @@ fn render_entries(
|
||||
|
||||
entries_rendered
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use fs::FakeFs;
|
||||
use gpui::TestAppContext;
|
||||
use project::Project;
|
||||
use settings::SettingsStore;
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_large_file_fallback_to_subset(cx: &mut TestAppContext) {
|
||||
cx.update(|cx| {
|
||||
let settings = SettingsStore::test(cx);
|
||||
cx.set_global(settings);
|
||||
});
|
||||
|
||||
let fs = FakeFs::new(cx.executor());
|
||||
let project = Project::test(fs, [], cx).await;
|
||||
|
||||
let content = "A".repeat(100 * 1024); // 100KB
|
||||
let content_len = content.len();
|
||||
let buffer = project
|
||||
.update(cx, |project, cx| project.create_buffer(true, cx))
|
||||
.await
|
||||
.expect("failed to create buffer");
|
||||
|
||||
buffer.update(cx, |buffer, cx| buffer.set_text(content, cx));
|
||||
|
||||
let result = cx
|
||||
.spawn(|cx| async move { get_buffer_content_or_outline(buffer, None, &cx).await })
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Should contain some of the actual file content
|
||||
assert!(
|
||||
result.text.contains("AAAAAAAAAA"),
|
||||
"Result did not contain content subset"
|
||||
);
|
||||
|
||||
// Should be marked as not an outline (it's truncated content)
|
||||
assert!(
|
||||
!result.is_outline,
|
||||
"Large file without outline should not be marked as outline"
|
||||
);
|
||||
|
||||
// Should be reasonably sized (much smaller than original)
|
||||
assert!(
|
||||
result.text.len() < 50 * 1024,
|
||||
"Result size {} should be smaller than 50KB",
|
||||
result.text.len()
|
||||
);
|
||||
|
||||
// Should be significantly smaller than the original content
|
||||
assert!(
|
||||
result.text.len() < content_len / 10,
|
||||
"Result should be much smaller than original content"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2671,13 +2671,14 @@ mod tests {
|
||||
}
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_large_file_mention_uses_outline(cx: &mut TestAppContext) {
|
||||
async fn test_large_file_mention_fallback(cx: &mut TestAppContext) {
|
||||
init_test(cx);
|
||||
|
||||
let fs = FakeFs::new(cx.executor());
|
||||
|
||||
// Create a large file that exceeds AUTO_OUTLINE_SIZE
|
||||
const LINE: &str = "fn example_function() { /* some code */ }\n";
|
||||
// Using plain text without a configured language, so no outline is available
|
||||
const LINE: &str = "This is a line of text in the file\n";
|
||||
let large_content = LINE.repeat(2 * (outline::AUTO_OUTLINE_SIZE / LINE.len()));
|
||||
assert!(large_content.len() > outline::AUTO_OUTLINE_SIZE);
|
||||
|
||||
@@ -2688,8 +2689,8 @@ mod tests {
|
||||
fs.insert_tree(
|
||||
"/project",
|
||||
json!({
|
||||
"large_file.rs": large_content.clone(),
|
||||
"small_file.rs": small_content,
|
||||
"large_file.txt": large_content.clone(),
|
||||
"small_file.txt": small_content,
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
@@ -2735,7 +2736,7 @@ mod tests {
|
||||
let large_file_abs_path = project.read_with(cx, |project, cx| {
|
||||
let worktree = project.worktrees(cx).next().unwrap();
|
||||
let worktree_root = worktree.read(cx).abs_path();
|
||||
worktree_root.join("large_file.rs")
|
||||
worktree_root.join("large_file.txt")
|
||||
});
|
||||
let large_file_task = message_editor.update(cx, |editor, cx| {
|
||||
editor.confirm_mention_for_file(large_file_abs_path, cx)
|
||||
@@ -2744,11 +2745,20 @@ mod tests {
|
||||
let large_file_mention = large_file_task.await.unwrap();
|
||||
match large_file_mention {
|
||||
Mention::Text { content, .. } => {
|
||||
// Should contain outline header for large files
|
||||
assert!(content.contains("File outline for"));
|
||||
assert!(content.contains("file too large to show full content"));
|
||||
// Should not contain the full repeated content
|
||||
assert!(!content.contains(&LINE.repeat(100)));
|
||||
// Should contain some of the content but not all of it
|
||||
assert!(
|
||||
content.contains(LINE),
|
||||
"Should contain some of the file content"
|
||||
);
|
||||
assert!(
|
||||
!content.contains(&LINE.repeat(100)),
|
||||
"Should not contain the full file"
|
||||
);
|
||||
// Should be much smaller than original
|
||||
assert!(
|
||||
content.len() < large_content.len() / 10,
|
||||
"Should be significantly truncated"
|
||||
);
|
||||
}
|
||||
_ => panic!("Expected Text mention for large file"),
|
||||
}
|
||||
@@ -2758,7 +2768,7 @@ mod tests {
|
||||
let small_file_abs_path = project.read_with(cx, |project, cx| {
|
||||
let worktree = project.worktrees(cx).next().unwrap();
|
||||
let worktree_root = worktree.read(cx).abs_path();
|
||||
worktree_root.join("small_file.rs")
|
||||
worktree_root.join("small_file.txt")
|
||||
});
|
||||
let small_file_task = message_editor.update(cx, |editor, cx| {
|
||||
editor.confirm_mention_for_file(small_file_abs_path, cx)
|
||||
@@ -2767,10 +2777,8 @@ mod tests {
|
||||
let small_file_mention = small_file_task.await.unwrap();
|
||||
match small_file_mention {
|
||||
Mention::Text { content, .. } => {
|
||||
// Should contain the actual content
|
||||
// Should contain the full actual content
|
||||
assert_eq!(content, small_content);
|
||||
// Should not contain outline header
|
||||
assert!(!content.contains("File outline for"));
|
||||
}
|
||||
_ => panic!("Expected Text mention for small file"),
|
||||
}
|
||||
|
||||
@@ -1089,7 +1089,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_large_file_uses_outline(cx: &mut TestAppContext) {
|
||||
async fn test_large_file_uses_fallback(cx: &mut TestAppContext) {
|
||||
init_test_settings(cx);
|
||||
|
||||
// Create a large file that exceeds AUTO_OUTLINE_SIZE
|
||||
@@ -1101,16 +1101,16 @@ mod tests {
|
||||
|
||||
let file_context = load_context_for("file.txt", large_content, cx).await;
|
||||
|
||||
// Should contain some of the actual file content
|
||||
assert!(
|
||||
file_context
|
||||
.text
|
||||
.contains(&format!("# File outline for {}", path!("test/file.txt"))),
|
||||
"Large files should not get an outline"
|
||||
file_context.text.contains(LINE),
|
||||
"Should contain some of the file content"
|
||||
);
|
||||
|
||||
// Should be much smaller than original
|
||||
assert!(
|
||||
file_context.text.len() < content_len,
|
||||
"Outline should be smaller than original content"
|
||||
file_context.text.len() < content_len / 10,
|
||||
"Should be significantly smaller than original content"
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ pub struct SearchToolInput {
|
||||
}
|
||||
|
||||
/// Search for relevant code by path, syntax hierarchy, and content.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Hash)]
|
||||
pub struct SearchToolQuery {
|
||||
/// 1. A glob pattern to match file paths in the codebase to search in.
|
||||
pub glob: String,
|
||||
|
||||
@@ -39,25 +39,6 @@ pub enum Relation {
|
||||
Contributor,
|
||||
}
|
||||
|
||||
impl Model {
|
||||
/// Returns the timestamp of when the user's account was created.
|
||||
///
|
||||
/// This will be the earlier of the `created_at` and `github_user_created_at` timestamps.
|
||||
pub fn account_created_at(&self) -> NaiveDateTime {
|
||||
let mut account_created_at = self.created_at;
|
||||
if let Some(github_created_at) = self.github_user_created_at {
|
||||
account_created_at = account_created_at.min(github_created_at);
|
||||
}
|
||||
|
||||
account_created_at
|
||||
}
|
||||
|
||||
/// Returns the age of the user's account.
|
||||
pub fn account_age(&self) -> chrono::Duration {
|
||||
chrono::Utc::now().naive_utc() - self.account_created_at()
|
||||
}
|
||||
}
|
||||
|
||||
impl Related<super::access_token::Entity> for Entity {
|
||||
fn to() -> RelationDef {
|
||||
Relation::AccessToken.def()
|
||||
|
||||
@@ -370,11 +370,16 @@ impl BufferDiagnosticsEditor {
|
||||
continue;
|
||||
}
|
||||
|
||||
let languages = buffer_diagnostics_editor
|
||||
.read_with(cx, |b, cx| b.project.read(cx).languages().clone())
|
||||
.ok();
|
||||
|
||||
let diagnostic_blocks = cx.update(|_window, cx| {
|
||||
DiagnosticRenderer::diagnostic_blocks_for_group(
|
||||
group,
|
||||
buffer_snapshot.remote_id(),
|
||||
Some(Arc::new(buffer_diagnostics_editor.clone())),
|
||||
languages,
|
||||
cx,
|
||||
)
|
||||
})?;
|
||||
|
||||
@@ -6,7 +6,7 @@ use editor::{
|
||||
hover_popover::diagnostics_markdown_style,
|
||||
};
|
||||
use gpui::{AppContext, Entity, Focusable, WeakEntity};
|
||||
use language::{BufferId, Diagnostic, DiagnosticEntryRef};
|
||||
use language::{BufferId, Diagnostic, DiagnosticEntryRef, LanguageRegistry};
|
||||
use lsp::DiagnosticSeverity;
|
||||
use markdown::{Markdown, MarkdownElement};
|
||||
use settings::Settings;
|
||||
@@ -27,6 +27,7 @@ impl DiagnosticRenderer {
|
||||
diagnostic_group: Vec<DiagnosticEntryRef<'_, Point>>,
|
||||
buffer_id: BufferId,
|
||||
diagnostics_editor: Option<Arc<dyn DiagnosticsToolbarEditor>>,
|
||||
language_registry: Option<Arc<LanguageRegistry>>,
|
||||
cx: &mut App,
|
||||
) -> Vec<DiagnosticBlock> {
|
||||
let Some(primary_ix) = diagnostic_group
|
||||
@@ -75,11 +76,14 @@ impl DiagnosticRenderer {
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
results.push(DiagnosticBlock {
|
||||
initial_range: primary.range.clone(),
|
||||
severity: primary.diagnostic.severity,
|
||||
diagnostics_editor: diagnostics_editor.clone(),
|
||||
markdown: cx.new(|cx| Markdown::new(markdown.into(), None, None, cx)),
|
||||
markdown: cx.new(|cx| {
|
||||
Markdown::new(markdown.into(), language_registry.clone(), None, cx)
|
||||
}),
|
||||
});
|
||||
} else {
|
||||
if entry.range.start.row.abs_diff(primary.range.start.row) >= 5 {
|
||||
@@ -91,7 +95,9 @@ impl DiagnosticRenderer {
|
||||
initial_range: entry.range.clone(),
|
||||
severity: entry.diagnostic.severity,
|
||||
diagnostics_editor: diagnostics_editor.clone(),
|
||||
markdown: cx.new(|cx| Markdown::new(markdown.into(), None, None, cx)),
|
||||
markdown: cx.new(|cx| {
|
||||
Markdown::new(markdown.into(), language_registry.clone(), None, cx)
|
||||
}),
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -118,9 +124,16 @@ impl editor::DiagnosticRenderer for DiagnosticRenderer {
|
||||
buffer_id: BufferId,
|
||||
snapshot: EditorSnapshot,
|
||||
editor: WeakEntity<Editor>,
|
||||
language_registry: Option<Arc<LanguageRegistry>>,
|
||||
cx: &mut App,
|
||||
) -> Vec<BlockProperties<Anchor>> {
|
||||
let blocks = Self::diagnostic_blocks_for_group(diagnostic_group, buffer_id, None, cx);
|
||||
let blocks = Self::diagnostic_blocks_for_group(
|
||||
diagnostic_group,
|
||||
buffer_id,
|
||||
None,
|
||||
language_registry,
|
||||
cx,
|
||||
);
|
||||
|
||||
blocks
|
||||
.into_iter()
|
||||
@@ -146,9 +159,16 @@ impl editor::DiagnosticRenderer for DiagnosticRenderer {
|
||||
diagnostic_group: Vec<DiagnosticEntryRef<'_, Point>>,
|
||||
range: Range<Point>,
|
||||
buffer_id: BufferId,
|
||||
language_registry: Option<Arc<LanguageRegistry>>,
|
||||
cx: &mut App,
|
||||
) -> Option<Entity<Markdown>> {
|
||||
let blocks = Self::diagnostic_blocks_for_group(diagnostic_group, buffer_id, None, cx);
|
||||
let blocks = Self::diagnostic_blocks_for_group(
|
||||
diagnostic_group,
|
||||
buffer_id,
|
||||
None,
|
||||
language_registry,
|
||||
cx,
|
||||
);
|
||||
blocks
|
||||
.into_iter()
|
||||
.find_map(|block| (block.initial_range == range).then(|| block.markdown))
|
||||
@@ -206,6 +226,11 @@ impl DiagnosticBlock {
|
||||
self.markdown.clone(),
|
||||
diagnostics_markdown_style(bcx.window, cx),
|
||||
)
|
||||
.code_block_renderer(markdown::CodeBlockRenderer::Default {
|
||||
copy_button: false,
|
||||
copy_button_on_hover: false,
|
||||
border: false,
|
||||
})
|
||||
.on_url_click({
|
||||
move |link, window, cx| {
|
||||
editor
|
||||
|
||||
@@ -73,7 +73,7 @@ pub fn init(cx: &mut App) {
|
||||
}
|
||||
|
||||
pub(crate) struct ProjectDiagnosticsEditor {
|
||||
project: Entity<Project>,
|
||||
pub project: Entity<Project>,
|
||||
workspace: WeakEntity<Workspace>,
|
||||
focus_handle: FocusHandle,
|
||||
editor: Entity<Editor>,
|
||||
@@ -545,11 +545,15 @@ impl ProjectDiagnosticsEditor {
|
||||
if group_severity.is_none_or(|s| s > max_severity) {
|
||||
continue;
|
||||
}
|
||||
let languages = this
|
||||
.read_with(cx, |t, cx| t.project.read(cx).languages().clone())
|
||||
.ok();
|
||||
let more = cx.update(|_, cx| {
|
||||
crate::diagnostic_renderer::DiagnosticRenderer::diagnostic_blocks_for_group(
|
||||
group,
|
||||
buffer_snapshot.remote_id(),
|
||||
Some(diagnostics_toolbar_editor.clone()),
|
||||
languages,
|
||||
cx,
|
||||
)
|
||||
})?;
|
||||
|
||||
@@ -506,7 +506,7 @@ impl CompletionsMenu {
|
||||
cx: &mut Context<Editor>,
|
||||
) {
|
||||
self.scroll_handle
|
||||
.scroll_to_item(self.selected_item, ScrollStrategy::Top);
|
||||
.scroll_to_item(self.selected_item, ScrollStrategy::Nearest);
|
||||
if let Some(provider) = provider {
|
||||
let entries = self.entries.borrow();
|
||||
let entry = if self.selected_item < entries.len() {
|
||||
|
||||
@@ -117,8 +117,9 @@ use language::{
|
||||
AutoindentMode, BlockCommentConfig, BracketMatch, BracketPair, Buffer, BufferRow,
|
||||
BufferSnapshot, Capability, CharClassifier, CharKind, CharScopeContext, CodeLabel, CursorShape,
|
||||
DiagnosticEntryRef, DiffOptions, EditPredictionsMode, EditPreview, HighlightedText, IndentKind,
|
||||
IndentSize, Language, OffsetRangeExt, OutlineItem, Point, Runnable, RunnableRange, Selection,
|
||||
SelectionGoal, TextObject, TransactionId, TreeSitterOptions, WordsQuery,
|
||||
IndentSize, Language, LanguageRegistry, OffsetRangeExt, OutlineItem, Point, Runnable,
|
||||
RunnableRange, Selection, SelectionGoal, TextObject, TransactionId, TreeSitterOptions,
|
||||
WordsQuery,
|
||||
language_settings::{
|
||||
self, LspInsertMode, RewrapBehavior, WordsCompletionMode, all_language_settings,
|
||||
language_settings,
|
||||
@@ -371,6 +372,7 @@ pub trait DiagnosticRenderer {
|
||||
buffer_id: BufferId,
|
||||
snapshot: EditorSnapshot,
|
||||
editor: WeakEntity<Editor>,
|
||||
language_registry: Option<Arc<LanguageRegistry>>,
|
||||
cx: &mut App,
|
||||
) -> Vec<BlockProperties<Anchor>>;
|
||||
|
||||
@@ -379,6 +381,7 @@ pub trait DiagnosticRenderer {
|
||||
diagnostic_group: Vec<DiagnosticEntryRef<'_, Point>>,
|
||||
range: Range<Point>,
|
||||
buffer_id: BufferId,
|
||||
language_registry: Option<Arc<LanguageRegistry>>,
|
||||
cx: &mut App,
|
||||
) -> Option<Entity<markdown::Markdown>>;
|
||||
|
||||
@@ -11060,6 +11063,10 @@ impl Editor {
|
||||
window: &mut Window,
|
||||
cx: &mut Context<Self>,
|
||||
) {
|
||||
if self.breakpoint_store.is_none() {
|
||||
return;
|
||||
}
|
||||
|
||||
for (anchor, breakpoint) in self.breakpoints_at_cursors(window, cx) {
|
||||
let breakpoint = breakpoint.unwrap_or_else(|| Breakpoint {
|
||||
message: None,
|
||||
@@ -11119,6 +11126,10 @@ impl Editor {
|
||||
window: &mut Window,
|
||||
cx: &mut Context<Self>,
|
||||
) {
|
||||
if self.breakpoint_store.is_none() {
|
||||
return;
|
||||
}
|
||||
|
||||
for (anchor, breakpoint) in self.breakpoints_at_cursors(window, cx) {
|
||||
let Some(breakpoint) = breakpoint.filter(|breakpoint| breakpoint.is_disabled()) else {
|
||||
continue;
|
||||
@@ -11138,6 +11149,10 @@ impl Editor {
|
||||
window: &mut Window,
|
||||
cx: &mut Context<Self>,
|
||||
) {
|
||||
if self.breakpoint_store.is_none() {
|
||||
return;
|
||||
}
|
||||
|
||||
for (anchor, breakpoint) in self.breakpoints_at_cursors(window, cx) {
|
||||
let Some(breakpoint) = breakpoint.filter(|breakpoint| breakpoint.is_enabled()) else {
|
||||
continue;
|
||||
@@ -11157,6 +11172,10 @@ impl Editor {
|
||||
window: &mut Window,
|
||||
cx: &mut Context<Self>,
|
||||
) {
|
||||
if self.breakpoint_store.is_none() {
|
||||
return;
|
||||
}
|
||||
|
||||
for (anchor, breakpoint) in self.breakpoints_at_cursors(window, cx) {
|
||||
if let Some(breakpoint) = breakpoint {
|
||||
self.edit_breakpoint_at_anchor(
|
||||
@@ -17931,8 +17950,18 @@ impl Editor {
|
||||
.diagnostic_group(buffer_id, diagnostic.diagnostic.group_id)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let blocks =
|
||||
renderer.render_group(diagnostic_group, buffer_id, snapshot, cx.weak_entity(), cx);
|
||||
let language_registry = self
|
||||
.project()
|
||||
.map(|project| project.read(cx).languages().clone());
|
||||
|
||||
let blocks = renderer.render_group(
|
||||
diagnostic_group,
|
||||
buffer_id,
|
||||
snapshot,
|
||||
cx.weak_entity(),
|
||||
language_registry,
|
||||
cx,
|
||||
);
|
||||
|
||||
let blocks = self.display_map.update(cx, |display_map, cx| {
|
||||
display_map.insert_blocks(blocks, cx).into_iter().collect()
|
||||
|
||||
@@ -341,7 +341,13 @@ fn show_hover(
|
||||
renderer
|
||||
.as_ref()
|
||||
.and_then(|renderer| {
|
||||
renderer.render_hover(group, point_range, buffer_id, cx)
|
||||
renderer.render_hover(
|
||||
group,
|
||||
point_range,
|
||||
buffer_id,
|
||||
language_registry.clone(),
|
||||
cx,
|
||||
)
|
||||
})
|
||||
.context("no rendered diagnostic")
|
||||
})??;
|
||||
@@ -986,6 +992,11 @@ impl DiagnosticPopover {
|
||||
self.markdown.clone(),
|
||||
diagnostics_markdown_style(window, cx),
|
||||
)
|
||||
.code_block_renderer(markdown::CodeBlockRenderer::Default {
|
||||
copy_button: false,
|
||||
copy_button_on_hover: false,
|
||||
border: false,
|
||||
})
|
||||
.on_url_click(
|
||||
move |link, window, cx| {
|
||||
if let Some(renderer) = GlobalDiagnosticRenderer::global(cx)
|
||||
|
||||
@@ -72,8 +72,8 @@ impl Watcher for FsWatcher {
|
||||
}
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
log::trace!("path to watch is already watched: {path:?}");
|
||||
if self.registrations.lock().contains_key(path) {
|
||||
log::trace!("path to watch is already watched: {path:?}");
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,6 +92,10 @@ pub enum ScrollStrategy {
|
||||
/// May not be possible if there's not enough list items above the item scrolled to:
|
||||
/// in this case, the element will be placed at the closest possible position.
|
||||
Bottom,
|
||||
/// If the element is not visible attempt to place it at:
|
||||
/// - The top of the list's viewport if the target element is above currently visible elements.
|
||||
/// - The bottom of the list's viewport if the target element is above currently visible elements.
|
||||
Nearest,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
@@ -391,39 +395,42 @@ impl Element for UniformList {
|
||||
scroll_offset.x = Pixels::ZERO;
|
||||
}
|
||||
|
||||
if let Some(deferred_scroll) = shared_scroll_to_item {
|
||||
let mut ix = deferred_scroll.item_index;
|
||||
if let Some(DeferredScrollToItem {
|
||||
mut item_index,
|
||||
mut strategy,
|
||||
offset,
|
||||
scroll_strict,
|
||||
}) = shared_scroll_to_item
|
||||
{
|
||||
if y_flipped {
|
||||
ix = self.item_count.saturating_sub(ix + 1);
|
||||
item_index = self.item_count.saturating_sub(item_index + 1);
|
||||
}
|
||||
let list_height = padded_bounds.size.height;
|
||||
let mut updated_scroll_offset = shared_scroll_offset.borrow_mut();
|
||||
let item_top = item_height * ix;
|
||||
let item_top = item_height * item_index;
|
||||
let item_bottom = item_top + item_height;
|
||||
let scroll_top = -updated_scroll_offset.y;
|
||||
let offset_pixels = item_height * deferred_scroll.offset;
|
||||
let mut scrolled_to_top = false;
|
||||
let offset_pixels = item_height * offset;
|
||||
|
||||
if item_top < scroll_top + offset_pixels {
|
||||
scrolled_to_top = true;
|
||||
// todo: using the padding here is wrong - this only works well for few scenarios
|
||||
updated_scroll_offset.y = -item_top + padding.top + offset_pixels;
|
||||
} else if item_bottom > scroll_top + list_height {
|
||||
scrolled_to_top = true;
|
||||
updated_scroll_offset.y = -(item_bottom - list_height);
|
||||
}
|
||||
// is the selected item above/below currently visible items
|
||||
let is_above = item_top < scroll_top + offset_pixels;
|
||||
let is_below = item_bottom > scroll_top + list_height;
|
||||
|
||||
if deferred_scroll.scroll_strict
|
||||
|| (scrolled_to_top
|
||||
&& (item_top < scroll_top + offset_pixels
|
||||
|| item_bottom > scroll_top + list_height))
|
||||
{
|
||||
match deferred_scroll.strategy {
|
||||
if scroll_strict || is_above || is_below {
|
||||
if strategy == ScrollStrategy::Nearest {
|
||||
if is_above {
|
||||
strategy = ScrollStrategy::Top;
|
||||
} else if is_below {
|
||||
strategy = ScrollStrategy::Bottom;
|
||||
}
|
||||
}
|
||||
|
||||
let max_scroll_offset =
|
||||
(content_height - list_height).max(Pixels::ZERO);
|
||||
match strategy {
|
||||
ScrollStrategy::Top => {
|
||||
updated_scroll_offset.y = -(item_top - offset_pixels)
|
||||
.max(Pixels::ZERO)
|
||||
.min(content_height - list_height)
|
||||
.max(Pixels::ZERO);
|
||||
.clamp(Pixels::ZERO, max_scroll_offset);
|
||||
}
|
||||
ScrollStrategy::Center => {
|
||||
let item_center = item_top + item_height / 2.0;
|
||||
@@ -431,18 +438,15 @@ impl Element for UniformList {
|
||||
let viewport_height = list_height - offset_pixels;
|
||||
let viewport_center = offset_pixels + viewport_height / 2.0;
|
||||
let target_scroll_top = item_center - viewport_center;
|
||||
|
||||
updated_scroll_offset.y = -target_scroll_top
|
||||
.max(Pixels::ZERO)
|
||||
.min(content_height - list_height)
|
||||
.max(Pixels::ZERO);
|
||||
updated_scroll_offset.y =
|
||||
-target_scroll_top.clamp(Pixels::ZERO, max_scroll_offset);
|
||||
}
|
||||
ScrollStrategy::Bottom => {
|
||||
updated_scroll_offset.y = -(item_bottom - list_height
|
||||
+ offset_pixels)
|
||||
.max(Pixels::ZERO)
|
||||
.min(content_height - list_height)
|
||||
.max(Pixels::ZERO);
|
||||
updated_scroll_offset.y = -(item_bottom - list_height)
|
||||
.clamp(Pixels::ZERO, max_scroll_offset);
|
||||
}
|
||||
ScrollStrategy::Nearest => {
|
||||
// Nearest, but the item is visible -> no scroll is required
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -695,3 +699,150 @@ impl InteractiveElement for UniformList {
|
||||
&mut self.interactivity
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::TestAppContext;
|
||||
|
||||
#[gpui::test]
|
||||
fn test_scroll_strategy_nearest(cx: &mut TestAppContext) {
|
||||
use crate::{
|
||||
Context, FocusHandle, ScrollStrategy, UniformListScrollHandle, Window, actions, div,
|
||||
prelude::*, px, uniform_list,
|
||||
};
|
||||
use std::ops::Range;
|
||||
|
||||
actions!(example, [SelectNext, SelectPrev]);
|
||||
|
||||
struct TestView {
|
||||
index: usize,
|
||||
length: usize,
|
||||
scroll_handle: UniformListScrollHandle,
|
||||
focus_handle: FocusHandle,
|
||||
visible_range: Range<usize>,
|
||||
}
|
||||
|
||||
impl TestView {
|
||||
pub fn select_next(
|
||||
&mut self,
|
||||
_: &SelectNext,
|
||||
window: &mut Window,
|
||||
_: &mut Context<Self>,
|
||||
) {
|
||||
if self.index + 1 == self.length {
|
||||
self.index = 0
|
||||
} else {
|
||||
self.index += 1;
|
||||
}
|
||||
self.scroll_handle
|
||||
.scroll_to_item(self.index, ScrollStrategy::Nearest);
|
||||
window.refresh();
|
||||
}
|
||||
|
||||
pub fn select_previous(
|
||||
&mut self,
|
||||
_: &SelectPrev,
|
||||
window: &mut Window,
|
||||
_: &mut Context<Self>,
|
||||
) {
|
||||
if self.index == 0 {
|
||||
self.index = self.length - 1
|
||||
} else {
|
||||
self.index -= 1;
|
||||
}
|
||||
self.scroll_handle
|
||||
.scroll_to_item(self.index, ScrollStrategy::Nearest);
|
||||
window.refresh();
|
||||
}
|
||||
}
|
||||
|
||||
impl Render for TestView {
|
||||
fn render(&mut self, _window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
|
||||
div()
|
||||
.id("list-example")
|
||||
.track_focus(&self.focus_handle)
|
||||
.on_action(cx.listener(Self::select_next))
|
||||
.on_action(cx.listener(Self::select_previous))
|
||||
.size_full()
|
||||
.child(
|
||||
uniform_list(
|
||||
"entries",
|
||||
self.length,
|
||||
cx.processor(|this, range: Range<usize>, _window, _cx| {
|
||||
this.visible_range = range.clone();
|
||||
range
|
||||
.map(|ix| div().id(ix).h(px(20.0)).child(format!("Item {ix}")))
|
||||
.collect()
|
||||
}),
|
||||
)
|
||||
.track_scroll(self.scroll_handle.clone())
|
||||
.h(px(200.0)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
let (view, cx) = cx.add_window_view(|window, cx| {
|
||||
let focus_handle = cx.focus_handle();
|
||||
window.focus(&focus_handle);
|
||||
TestView {
|
||||
scroll_handle: UniformListScrollHandle::new(),
|
||||
index: 0,
|
||||
focus_handle,
|
||||
length: 47,
|
||||
visible_range: 0..0,
|
||||
}
|
||||
});
|
||||
|
||||
// 10 out of 47 items are visible
|
||||
|
||||
// First 9 times selecting next item does not scroll
|
||||
for ix in 1..10 {
|
||||
cx.dispatch_action(SelectNext);
|
||||
view.read_with(cx, |view, _| {
|
||||
assert_eq!(view.index, ix);
|
||||
assert_eq!(view.visible_range, 0..10);
|
||||
})
|
||||
}
|
||||
|
||||
// Now each time the list scrolls down by 1
|
||||
for ix in 10..47 {
|
||||
cx.dispatch_action(SelectNext);
|
||||
view.read_with(cx, |view, _| {
|
||||
assert_eq!(view.index, ix);
|
||||
assert_eq!(view.visible_range, ix - 9..ix + 1);
|
||||
})
|
||||
}
|
||||
|
||||
// After the last item we move back to the start
|
||||
cx.dispatch_action(SelectNext);
|
||||
view.read_with(cx, |view, _| {
|
||||
assert_eq!(view.index, 0);
|
||||
assert_eq!(view.visible_range, 0..10);
|
||||
});
|
||||
|
||||
// Return to the last element
|
||||
cx.dispatch_action(SelectPrev);
|
||||
view.read_with(cx, |view, _| {
|
||||
assert_eq!(view.index, 46);
|
||||
assert_eq!(view.visible_range, 37..47);
|
||||
});
|
||||
|
||||
// First 9 times selecting previous does not scroll
|
||||
for ix in (37..46).rev() {
|
||||
cx.dispatch_action(SelectPrev);
|
||||
view.read_with(cx, |view, _| {
|
||||
assert_eq!(view.index, ix);
|
||||
assert_eq!(view.visible_range, 37..47);
|
||||
})
|
||||
}
|
||||
|
||||
// Now each time the list scrolls up by 1
|
||||
for ix in (0..37).rev() {
|
||||
cx.dispatch_action(SelectPrev);
|
||||
view.read_with(cx, |view, _| {
|
||||
assert_eq!(view.index, ix);
|
||||
assert_eq!(view.visible_range, ix..ix + 10);
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -327,10 +327,8 @@ impl BackgroundExecutor {
|
||||
"parked with nothing left to run{waiting_message}{backtrace_message}",
|
||||
)
|
||||
}
|
||||
dispatcher.set_unparker(unparker.clone());
|
||||
parker.park_timeout(
|
||||
test_should_end_by.saturating_duration_since(Instant::now()),
|
||||
);
|
||||
dispatcher.push_unparker(unparker.clone());
|
||||
parker.park_timeout(Duration::from_millis(1));
|
||||
if Instant::now() > test_should_end_by {
|
||||
panic!("test timed out after {duration:?} with allow_parking")
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ struct TestDispatcherState {
|
||||
waiting_backtrace: Option<Backtrace>,
|
||||
deprioritized_task_labels: HashSet<TaskLabel>,
|
||||
block_on_ticks: RangeInclusive<usize>,
|
||||
last_parked: Option<Unparker>,
|
||||
unparkers: Vec<Unparker>,
|
||||
}
|
||||
|
||||
impl TestDispatcher {
|
||||
@@ -58,7 +58,7 @@ impl TestDispatcher {
|
||||
waiting_backtrace: None,
|
||||
deprioritized_task_labels: Default::default(),
|
||||
block_on_ticks: 0..=1000,
|
||||
last_parked: None,
|
||||
unparkers: Default::default(),
|
||||
};
|
||||
|
||||
TestDispatcher {
|
||||
@@ -245,20 +245,14 @@ impl TestDispatcher {
|
||||
let block_on_ticks = lock.block_on_ticks.clone();
|
||||
lock.random.random_range(block_on_ticks)
|
||||
}
|
||||
pub fn unpark_last(&self) {
|
||||
self.state
|
||||
.lock()
|
||||
.last_parked
|
||||
.take()
|
||||
.as_ref()
|
||||
.map(Unparker::unpark);
|
||||
|
||||
pub fn unpark_all(&self) {
|
||||
self.state.lock().unparkers.retain(|parker| parker.unpark());
|
||||
}
|
||||
|
||||
pub fn set_unparker(&self, unparker: Unparker) {
|
||||
let last = { self.state.lock().last_parked.replace(unparker) };
|
||||
if let Some(last) = last {
|
||||
last.unpark();
|
||||
}
|
||||
pub fn push_unparker(&self, unparker: Unparker) {
|
||||
let mut state = self.state.lock();
|
||||
state.unparkers.push(unparker);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -299,7 +293,7 @@ impl PlatformDispatcher for TestDispatcher {
|
||||
state.background.push(runnable);
|
||||
}
|
||||
}
|
||||
self.unpark_last();
|
||||
self.unpark_all();
|
||||
}
|
||||
|
||||
fn dispatch_on_main_thread(&self, runnable: RunnableVariant) {
|
||||
@@ -309,7 +303,7 @@ impl PlatformDispatcher for TestDispatcher {
|
||||
.entry(self.id)
|
||||
.or_default()
|
||||
.push_back(runnable);
|
||||
self.unpark_last();
|
||||
self.unpark_all();
|
||||
}
|
||||
|
||||
fn dispatch_after(&self, duration: std::time::Duration, runnable: RunnableVariant) {
|
||||
|
||||
@@ -9,6 +9,36 @@
|
||||
(type_identifier) @type
|
||||
(predefined_type) @type.builtin
|
||||
|
||||
;; Highlights object literals by hijacking the statement_block pattern, but only if
|
||||
;; the statement block follows an object literal pattern
|
||||
((statement_block
|
||||
(labeled_statement
|
||||
;; highlight the label like a property name
|
||||
label: (statement_identifier) @property.name
|
||||
body: [
|
||||
;; match a terminating expression statement
|
||||
(expression_statement
|
||||
;; single identifier - treat as a type name
|
||||
[(identifier) @type.name
|
||||
;; object - treat as a property - type pair
|
||||
(object
|
||||
(pair
|
||||
key: (_) @property.name
|
||||
value: (_) @type.name))
|
||||
;; subscript_expression - treat as an array declaration
|
||||
(subscript_expression
|
||||
object: (_) @type.name
|
||||
index: (_)
|
||||
)
|
||||
;; templated string - treat each identifier contained as a type name
|
||||
(template_string
|
||||
(template_substitution
|
||||
(identifier) @type.name))
|
||||
])
|
||||
;; match a nested statement block
|
||||
(statement_block) @nested
|
||||
])))
|
||||
|
||||
(import_specifier
|
||||
"type"
|
||||
name: (identifier) @type
|
||||
@@ -79,6 +109,8 @@
|
||||
left: (identifier) @function
|
||||
right: [(function_expression) (arrow_function)])
|
||||
|
||||
(arrow_function) @function
|
||||
|
||||
; Literals
|
||||
|
||||
(this) @variable.special
|
||||
|
||||
@@ -6,11 +6,12 @@ use language::{LanguageName, LspAdapter, LspAdapterDelegate, LspInstaller, Toolc
|
||||
use lsp::{CodeActionKind, LanguageServerBinary, LanguageServerName};
|
||||
use node_runtime::{NodeRuntime, VersionStrategy};
|
||||
use project::{Fs, lsp_store::language_server_settings};
|
||||
use regex::Regex;
|
||||
use serde_json::Value;
|
||||
use std::{
|
||||
ffi::OsString,
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
sync::{Arc, LazyLock},
|
||||
};
|
||||
use util::{ResultExt, maybe, merge_json_value_into};
|
||||
|
||||
@@ -56,6 +57,20 @@ impl VtslsLspAdapter {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enhance_diagnostic_message(message: &str) -> Option<String> {
|
||||
static SINGLE_WORD_REGEX: LazyLock<Regex> =
|
||||
LazyLock::new(|| Regex::new(r"'([^\s']*)'").expect("Failed to create REGEX"));
|
||||
|
||||
static MULTI_WORD_REGEX: LazyLock<Regex> =
|
||||
LazyLock::new(|| Regex::new(r"'([^']+\s+[^']*)'").expect("Failed to create REGEX"));
|
||||
|
||||
let first = SINGLE_WORD_REGEX.replace_all(message, "`$1`").to_string();
|
||||
let second = MULTI_WORD_REGEX
|
||||
.replace_all(&first, "\n```typescript\n$1\n```\n")
|
||||
.to_string();
|
||||
Some(second)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TypeScriptVersions {
|
||||
@@ -274,6 +289,10 @@ impl LspAdapter for VtslsLspAdapter {
|
||||
Ok(default_workspace_configuration)
|
||||
}
|
||||
|
||||
fn diagnostic_message_to_markdown(&self, message: &str) -> Option<String> {
|
||||
VtslsLspAdapter::enhance_diagnostic_message(message)
|
||||
}
|
||||
|
||||
fn language_ids(&self) -> HashMap<LanguageName, String> {
|
||||
HashMap::from_iter([
|
||||
(LanguageName::new("TypeScript"), "typescript".into()),
|
||||
@@ -302,3 +321,41 @@ async fn get_cached_ts_server_binary(
|
||||
.await
|
||||
.log_err()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::vtsls::VtslsLspAdapter;
|
||||
|
||||
#[test]
|
||||
fn test_diagnostic_message_to_markdown() {
|
||||
// Leaves simple messages unchanged
|
||||
let message = "The expected type comes from the return type of this signature.";
|
||||
|
||||
let expected = "The expected type comes from the return type of this signature.";
|
||||
|
||||
assert_eq!(
|
||||
VtslsLspAdapter::enhance_diagnostic_message(message).expect("Should be some"),
|
||||
expected
|
||||
);
|
||||
|
||||
// Parses both multi-word and single-word correctly
|
||||
let message = "Property 'baz' is missing in type '{ foo: string; bar: string; }' but required in type 'User'.";
|
||||
|
||||
let expected = "Property `baz` is missing in type \n```typescript\n{ foo: string; bar: string; }\n```\n but required in type `User`.";
|
||||
|
||||
assert_eq!(
|
||||
VtslsLspAdapter::enhance_diagnostic_message(message).expect("Should be some"),
|
||||
expected
|
||||
);
|
||||
|
||||
// Parses multi-and-single word in any order, and ignores existing newlines
|
||||
let message = "Type '() => { foo: string; bar: string; }' is not assignable to type 'GetUserFunction'.\n Property 'baz' is missing in type '{ foo: string; bar: string; }' but required in type 'User'.";
|
||||
|
||||
let expected = "Type \n```typescript\n() => { foo: string; bar: string; }\n```\n is not assignable to type `GetUserFunction`.\n Property `baz` is missing in type \n```typescript\n{ foo: string; bar: string; }\n```\n but required in type `User`.";
|
||||
|
||||
assert_eq!(
|
||||
VtslsLspAdapter::enhance_diagnostic_message(message).expect("Should be some"),
|
||||
expected
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -709,7 +709,7 @@ impl<D: PickerDelegate> Picker<D> {
|
||||
match &mut self.element_container {
|
||||
ElementContainer::List(state) => state.scroll_to_reveal_item(ix),
|
||||
ElementContainer::UniformList(scroll_handle) => {
|
||||
scroll_handle.scroll_to_item(ix, ScrollStrategy::Top)
|
||||
scroll_handle.scroll_to_item(ix, ScrollStrategy::Nearest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -334,7 +334,7 @@ pub struct LanguageSettingsContent {
|
||||
///
|
||||
/// Default: true
|
||||
pub use_on_type_format: Option<bool>,
|
||||
/// Which code actions to run on save after the formatter.
|
||||
/// Which code actions to run on save before the formatter.
|
||||
/// These are not run if formatting is off.
|
||||
///
|
||||
/// Default: {} (or {"source.organizeImports": true} for Go).
|
||||
|
||||
@@ -116,6 +116,10 @@ pub struct TerminalSettingsContent {
|
||||
///
|
||||
/// Default: 10_000
|
||||
pub max_scroll_history_lines: Option<usize>,
|
||||
/// The multiplier for scrolling with the mouse wheel.
|
||||
///
|
||||
/// Default: 1.0
|
||||
pub scroll_multiplier: Option<f32>,
|
||||
/// Toolbar related settings
|
||||
pub toolbar: Option<TerminalToolbarContent>,
|
||||
/// Scrollbar-related settings
|
||||
|
||||
@@ -737,6 +737,7 @@ impl VsCodeSettings {
|
||||
option_as_meta: self.read_bool("terminal.integrated.macOptionIsMeta"),
|
||||
project: self.project_terminal_settings_content(),
|
||||
scrollbar: None,
|
||||
scroll_multiplier: None,
|
||||
toolbar: None,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5168,6 +5168,24 @@ pub(crate) fn settings_data(cx: &App) -> Vec<SettingsPage> {
|
||||
metadata: None,
|
||||
files: USER,
|
||||
}),
|
||||
SettingsPageItem::SettingItem(SettingItem {
|
||||
title: "Scroll Multiplier",
|
||||
description: "The multiplier for scrolling in the terminal with the mouse wheel",
|
||||
field: Box::new(SettingField {
|
||||
json_path: Some("terminal.scroll_multiplier"),
|
||||
pick: |settings_content| {
|
||||
settings_content.terminal.as_ref()?.scroll_multiplier.as_ref()
|
||||
},
|
||||
write: |settings_content, value| {
|
||||
settings_content
|
||||
.terminal
|
||||
.get_or_insert_default()
|
||||
.scroll_multiplier = value;
|
||||
},
|
||||
}),
|
||||
metadata: None,
|
||||
files: USER,
|
||||
}),
|
||||
SettingsPageItem::SectionHeader("Toolbar"),
|
||||
SettingsPageItem::SettingItem(SettingItem {
|
||||
title: "Breadcrumbs",
|
||||
|
||||
@@ -108,13 +108,6 @@ actions!(
|
||||
]
|
||||
);
|
||||
|
||||
///Scrolling is unbearably sluggish by default. Alacritty supports a configurable
|
||||
///Scroll multiplier that is set to 3 by default. This will be removed when I
|
||||
///Implement scroll bars.
|
||||
#[cfg(target_os = "macos")]
|
||||
const SCROLL_MULTIPLIER: f32 = 4.;
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
const SCROLL_MULTIPLIER: f32 = 1.;
|
||||
const DEBUG_TERMINAL_WIDTH: Pixels = px(500.);
|
||||
const DEBUG_TERMINAL_HEIGHT: Pixels = px(30.);
|
||||
const DEBUG_CELL_WIDTH: Pixels = px(5.);
|
||||
@@ -1890,10 +1883,11 @@ impl Terminal {
|
||||
}
|
||||
|
||||
///Scroll the terminal
|
||||
pub fn scroll_wheel(&mut self, e: &ScrollWheelEvent) {
|
||||
pub fn scroll_wheel(&mut self, e: &ScrollWheelEvent, scroll_multiplier: f32) {
|
||||
let mouse_mode = self.mouse_mode(e.shift);
|
||||
let scroll_multiplier = if mouse_mode { 1. } else { scroll_multiplier };
|
||||
|
||||
if let Some(scroll_lines) = self.determine_scroll_lines(e, mouse_mode) {
|
||||
if let Some(scroll_lines) = self.determine_scroll_lines(e, scroll_multiplier) {
|
||||
if mouse_mode {
|
||||
let point = grid_point(
|
||||
e.position - self.last_content.terminal_bounds.bounds.origin,
|
||||
@@ -1926,8 +1920,11 @@ impl Terminal {
|
||||
self.word_from_position(window.mouse_position());
|
||||
}
|
||||
|
||||
fn determine_scroll_lines(&mut self, e: &ScrollWheelEvent, mouse_mode: bool) -> Option<i32> {
|
||||
let scroll_multiplier = if mouse_mode { 1. } else { SCROLL_MULTIPLIER };
|
||||
fn determine_scroll_lines(
|
||||
&mut self,
|
||||
e: &ScrollWheelEvent,
|
||||
scroll_multiplier: f32,
|
||||
) -> Option<i32> {
|
||||
let line_height = self.last_content.terminal_bounds.line_height;
|
||||
match e.touch_phase {
|
||||
/* Reset scroll state on started */
|
||||
|
||||
@@ -7,6 +7,7 @@ use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub use settings::AlternateScroll;
|
||||
|
||||
use settings::{
|
||||
RegisterSetting, ShowScrollbar, TerminalBlink, TerminalDockPosition, TerminalLineHeight,
|
||||
VenvSettings, WorkingDirectory, merge_from::MergeFrom,
|
||||
@@ -42,6 +43,7 @@ pub struct TerminalSettings {
|
||||
pub default_height: Pixels,
|
||||
pub detect_venv: VenvSettings,
|
||||
pub max_scroll_history_lines: Option<usize>,
|
||||
pub scroll_multiplier: f32,
|
||||
pub toolbar: Toolbar,
|
||||
pub scrollbar: ScrollbarSettings,
|
||||
pub minimum_contrast: f32,
|
||||
@@ -105,6 +107,7 @@ impl settings::Settings for TerminalSettings {
|
||||
default_width: px(user_content.default_width.unwrap()),
|
||||
default_height: px(user_content.default_height.unwrap()),
|
||||
detect_venv: project_content.detect_venv.unwrap(),
|
||||
scroll_multiplier: user_content.scroll_multiplier.unwrap(),
|
||||
max_scroll_history_lines: user_content.max_scroll_history_lines,
|
||||
toolbar: Toolbar {
|
||||
breadcrumbs: user_content.toolbar.unwrap().breadcrumbs.unwrap(),
|
||||
|
||||
@@ -519,7 +519,12 @@ impl TerminalView {
|
||||
return;
|
||||
}
|
||||
}
|
||||
self.terminal.update(cx, |term, _| term.scroll_wheel(event));
|
||||
self.terminal.update(cx, |term, cx| {
|
||||
term.scroll_wheel(
|
||||
event,
|
||||
TerminalSettings::get_global(cx).scroll_multiplier.max(0.01),
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
fn scroll_line_up(&mut self, _: &ScrollLineUp, _: &mut Window, cx: &mut Context<Self>) {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
description = "The fast, collaborative code editor."
|
||||
edition.workspace = true
|
||||
name = "zed"
|
||||
version = "0.213.0"
|
||||
version = "0.214.0"
|
||||
publish.workspace = true
|
||||
license = "GPL-3.0-or-later"
|
||||
authors = ["Zed Team <hi@zed.dev>"]
|
||||
|
||||
@@ -12,7 +12,7 @@ workspace = true
|
||||
path = "src/zeta2.rs"
|
||||
|
||||
[features]
|
||||
llm-response-cache = []
|
||||
eval-support = []
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
@@ -37,11 +37,13 @@ release_channel.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
smol.workspace = true
|
||||
strsim.workspace = true
|
||||
thiserror.workspace = true
|
||||
util.workspace = true
|
||||
uuid.workspace = true
|
||||
workspace.workspace = true
|
||||
worktree.workspace = true
|
||||
pretty_assertions.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
clock = { workspace = true, features = ["test-support"] }
|
||||
@@ -51,7 +53,6 @@ lsp.workspace = true
|
||||
indoc.workspace = true
|
||||
language = { workspace = true, features = ["test-support"] }
|
||||
language_model = { workspace = true, features = ["test-support"] }
|
||||
pretty_assertions.workspace = true
|
||||
project = { workspace = true, features = ["test-support"] }
|
||||
settings = { workspace = true, features = ["test-support"] }
|
||||
zlog.workspace = true
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
use std::ops::Range;
|
||||
|
||||
use anyhow::Result;
|
||||
use cloud_zeta2_prompt::retrieval_prompt::SearchToolQuery;
|
||||
use collections::HashMap;
|
||||
@@ -14,17 +12,76 @@ use project::{
|
||||
search::{SearchQuery, SearchResult},
|
||||
};
|
||||
use smol::channel;
|
||||
use std::ops::Range;
|
||||
use util::{
|
||||
ResultExt as _,
|
||||
paths::{PathMatcher, PathStyle},
|
||||
};
|
||||
use workspace::item::Settings as _;
|
||||
|
||||
#[cfg(feature = "eval-support")]
|
||||
type CachedSearchResults = std::collections::BTreeMap<std::path::PathBuf, Vec<Range<usize>>>;
|
||||
|
||||
pub async fn run_retrieval_searches(
|
||||
project: Entity<Project>,
|
||||
queries: Vec<SearchToolQuery>,
|
||||
project: Entity<Project>,
|
||||
#[cfg(feature = "eval-support")] eval_cache: Option<std::sync::Arc<dyn crate::EvalCache>>,
|
||||
cx: &mut AsyncApp,
|
||||
) -> Result<HashMap<Entity<Buffer>, Vec<Range<Anchor>>>> {
|
||||
#[cfg(feature = "eval-support")]
|
||||
let cache = if let Some(eval_cache) = eval_cache {
|
||||
use crate::EvalCacheEntryKind;
|
||||
use anyhow::Context;
|
||||
use collections::FxHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
let mut hasher = FxHasher::default();
|
||||
project.read_with(cx, |project, cx| {
|
||||
let mut worktrees = project.worktrees(cx);
|
||||
let Some(worktree) = worktrees.next() else {
|
||||
panic!("Expected a single worktree in eval project. Found none.");
|
||||
};
|
||||
assert!(
|
||||
worktrees.next().is_none(),
|
||||
"Expected a single worktree in eval project. Found more than one."
|
||||
);
|
||||
worktree.read(cx).abs_path().hash(&mut hasher);
|
||||
})?;
|
||||
|
||||
queries.hash(&mut hasher);
|
||||
let key = (EvalCacheEntryKind::Search, hasher.finish());
|
||||
|
||||
if let Some(cached_results) = eval_cache.read(key) {
|
||||
let file_results = serde_json::from_str::<CachedSearchResults>(&cached_results)
|
||||
.context("Failed to deserialize cached search results")?;
|
||||
let mut results = HashMap::default();
|
||||
|
||||
for (path, ranges) in file_results {
|
||||
let buffer = project
|
||||
.update(cx, |project, cx| {
|
||||
let project_path = project.find_project_path(path, cx).unwrap();
|
||||
project.open_buffer(project_path, cx)
|
||||
})?
|
||||
.await?;
|
||||
let snapshot = buffer.read_with(cx, |buffer, _| buffer.snapshot())?;
|
||||
let mut ranges = ranges
|
||||
.into_iter()
|
||||
.map(|range| {
|
||||
snapshot.anchor_before(range.start)..snapshot.anchor_after(range.end)
|
||||
})
|
||||
.collect();
|
||||
merge_anchor_ranges(&mut ranges, &snapshot);
|
||||
results.insert(buffer, ranges);
|
||||
}
|
||||
|
||||
return Ok(results);
|
||||
}
|
||||
|
||||
Some((eval_cache, serde_json::to_string_pretty(&queries)?, key))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let (exclude_matcher, path_style) = project.update(cx, |project, cx| {
|
||||
let global_settings = WorktreeSettings::get_global(cx);
|
||||
let exclude_patterns = global_settings
|
||||
@@ -58,6 +115,8 @@ pub async fn run_retrieval_searches(
|
||||
}
|
||||
drop(results_tx);
|
||||
|
||||
#[cfg(feature = "eval-support")]
|
||||
let cache = cache.clone();
|
||||
cx.background_spawn(async move {
|
||||
let mut results: HashMap<Entity<Buffer>, Vec<Range<Anchor>>> = HashMap::default();
|
||||
let mut snapshots = HashMap::default();
|
||||
@@ -79,27 +138,32 @@ pub async fn run_retrieval_searches(
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "eval-support")]
|
||||
if let Some((cache, queries, key)) = cache {
|
||||
let cached_results: CachedSearchResults = results
|
||||
.iter()
|
||||
.filter_map(|(buffer, ranges)| {
|
||||
let snapshot = snapshots.get(&buffer.entity_id())?;
|
||||
let path = snapshot.file().map(|f| f.path());
|
||||
let mut ranges = ranges
|
||||
.iter()
|
||||
.map(|range| range.to_offset(&snapshot))
|
||||
.collect::<Vec<_>>();
|
||||
ranges.sort_unstable_by_key(|range| (range.start, range.end));
|
||||
|
||||
Some((path?.as_std_path().to_path_buf(), ranges))
|
||||
})
|
||||
.collect();
|
||||
cache.write(
|
||||
key,
|
||||
&queries,
|
||||
&serde_json::to_string_pretty(&cached_results)?,
|
||||
);
|
||||
}
|
||||
|
||||
for (buffer, ranges) in results.iter_mut() {
|
||||
if let Some(snapshot) = snapshots.get(&buffer.entity_id()) {
|
||||
ranges.sort_unstable_by(|a, b| {
|
||||
a.start
|
||||
.cmp(&b.start, snapshot)
|
||||
.then(b.end.cmp(&b.end, snapshot))
|
||||
});
|
||||
|
||||
let mut index = 1;
|
||||
while index < ranges.len() {
|
||||
if ranges[index - 1]
|
||||
.end
|
||||
.cmp(&ranges[index].start, snapshot)
|
||||
.is_gt()
|
||||
{
|
||||
let removed = ranges.remove(index);
|
||||
ranges[index - 1].end = removed.end;
|
||||
} else {
|
||||
index += 1;
|
||||
}
|
||||
}
|
||||
merge_anchor_ranges(ranges, snapshot);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,6 +172,28 @@ pub async fn run_retrieval_searches(
|
||||
.await
|
||||
}
|
||||
|
||||
fn merge_anchor_ranges(ranges: &mut Vec<Range<Anchor>>, snapshot: &BufferSnapshot) {
|
||||
ranges.sort_unstable_by(|a, b| {
|
||||
a.start
|
||||
.cmp(&b.start, snapshot)
|
||||
.then(b.end.cmp(&b.end, snapshot))
|
||||
});
|
||||
|
||||
let mut index = 1;
|
||||
while index < ranges.len() {
|
||||
if ranges[index - 1]
|
||||
.end
|
||||
.cmp(&ranges[index].start, snapshot)
|
||||
.is_ge()
|
||||
{
|
||||
let removed = ranges.remove(index);
|
||||
ranges[index - 1].end = removed.end;
|
||||
} else {
|
||||
index += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const MAX_EXCERPT_LEN: usize = 768;
|
||||
const MAX_RESULTS_LEN: usize = MAX_EXCERPT_LEN * 5;
|
||||
|
||||
@@ -485,9 +571,10 @@ mod tests {
|
||||
expected_output: &str,
|
||||
cx: &mut TestAppContext,
|
||||
) {
|
||||
let results = run_retrieval_searches(project.clone(), vec![query], &mut cx.to_async())
|
||||
.await
|
||||
.unwrap();
|
||||
let results =
|
||||
run_retrieval_searches(vec![query], project.clone(), None, &mut cx.to_async())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut results = results.into_iter().collect::<Vec<_>>();
|
||||
results.sort_by_key(|results| {
|
||||
|
||||
@@ -5,6 +5,15 @@ use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub async fn parse_xml_edits<'a>(
|
||||
input: &'a str,
|
||||
get_buffer: impl Fn(&Path) -> Option<(&'a BufferSnapshot, &'a [Range<Anchor>])> + Send,
|
||||
) -> Result<(&'a BufferSnapshot, Vec<(Range<Anchor>, Arc<str>)>)> {
|
||||
parse_xml_edits_inner(input, get_buffer)
|
||||
.await
|
||||
.with_context(|| format!("Failed to parse XML edits:\n{input}"))
|
||||
}
|
||||
|
||||
async fn parse_xml_edits_inner<'a>(
|
||||
mut input: &'a str,
|
||||
get_buffer: impl Fn(&Path) -> Option<(&'a BufferSnapshot, &'a [Range<Anchor>])> + Send,
|
||||
) -> Result<(&'a BufferSnapshot, Vec<(Range<Anchor>, Arc<str>)>)> {
|
||||
@@ -56,13 +65,29 @@ fn resolve_new_text_old_text_in_buffer(
|
||||
let range = range.to_offset(buffer);
|
||||
let text = buffer.text_for_range(range.clone()).collect::<String>();
|
||||
for (match_offset, _) in text.match_indices(old_text) {
|
||||
if offset.is_some() {
|
||||
anyhow::bail!("old_text is not unique enough:\n{}", old_text);
|
||||
if let Some(offset) = offset {
|
||||
let offset_match_point = buffer.offset_to_point(offset);
|
||||
let second_match_point = buffer.offset_to_point(range.start + match_offset);
|
||||
anyhow::bail!(
|
||||
"old_text is not unique enough:\n{}\nFound at {:?} and {:?}",
|
||||
old_text,
|
||||
offset_match_point,
|
||||
second_match_point
|
||||
);
|
||||
}
|
||||
offset = Some(range.start + match_offset);
|
||||
}
|
||||
}
|
||||
offset.ok_or_else(|| anyhow!("Failed to match old_text:\n{}", old_text))
|
||||
offset.ok_or_else(|| {
|
||||
#[cfg(debug_assertions)]
|
||||
if let Some(closest_match) = closest_old_text_match(buffer, old_text) {
|
||||
log::info!(
|
||||
"Closest `old_text` match: {}",
|
||||
pretty_assertions::StrComparison::new(old_text, &closest_match)
|
||||
)
|
||||
}
|
||||
anyhow!("Failed to match old_text:\n{}", old_text)
|
||||
})
|
||||
}?;
|
||||
|
||||
let edits_within_hunk = language::text_diff(&old_text, &new_text);
|
||||
@@ -77,6 +102,68 @@ fn resolve_new_text_old_text_in_buffer(
|
||||
}))
|
||||
}
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
fn closest_old_text_match(buffer: &TextBufferSnapshot, old_text: &str) -> Option<String> {
|
||||
let buffer_text = buffer.text();
|
||||
let len = old_text.len();
|
||||
|
||||
if len == 0 || buffer_text.len() < len {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut min_score = usize::MAX;
|
||||
let mut min_start = 0;
|
||||
|
||||
let old_text_bytes = old_text.as_bytes();
|
||||
let old_alpha_count = old_text_bytes
|
||||
.iter()
|
||||
.filter(|&&b| b.is_ascii_alphanumeric())
|
||||
.count();
|
||||
|
||||
let old_line_count = old_text.lines().count();
|
||||
|
||||
let mut cursor = 0;
|
||||
|
||||
while cursor + len <= buffer_text.len() {
|
||||
let candidate = &buffer_text[cursor..cursor + len];
|
||||
let candidate_bytes = candidate.as_bytes();
|
||||
|
||||
if usize::abs_diff(candidate.lines().count(), old_line_count) > 4 {
|
||||
cursor += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
let candidate_alpha_count = candidate_bytes
|
||||
.iter()
|
||||
.filter(|&&b| b.is_ascii_alphanumeric())
|
||||
.count();
|
||||
|
||||
// If alphanumeric character count differs by more than 30%, skip
|
||||
if usize::abs_diff(old_alpha_count, candidate_alpha_count) * 10 > old_alpha_count * 3 {
|
||||
cursor += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
let score = strsim::levenshtein(candidate, old_text);
|
||||
if score < min_score {
|
||||
min_score = score;
|
||||
min_start = cursor;
|
||||
|
||||
if min_score <= len / 10 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
cursor += 1;
|
||||
}
|
||||
|
||||
if min_score != usize::MAX {
|
||||
Some(buffer_text[min_start..min_start + len].to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
struct ParsedTag<'a> {
|
||||
attributes: &'a str,
|
||||
body: &'a str,
|
||||
|
||||
@@ -132,15 +132,8 @@ pub struct Zeta {
|
||||
options: ZetaOptions,
|
||||
update_required: bool,
|
||||
debug_tx: Option<mpsc::UnboundedSender<ZetaDebugInfo>>,
|
||||
#[cfg(feature = "llm-response-cache")]
|
||||
llm_response_cache: Option<Arc<dyn LlmResponseCache>>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "llm-response-cache")]
|
||||
pub trait LlmResponseCache: Send + Sync {
|
||||
fn get_key(&self, url: &gpui::http_client::Url, body: &str) -> u64;
|
||||
fn read_response(&self, key: u64) -> Option<String>;
|
||||
fn write_response(&self, key: u64, value: &str);
|
||||
#[cfg(feature = "eval-support")]
|
||||
eval_cache: Option<Arc<dyn EvalCache>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
@@ -369,14 +362,14 @@ impl Zeta {
|
||||
),
|
||||
update_required: false,
|
||||
debug_tx: None,
|
||||
#[cfg(feature = "llm-response-cache")]
|
||||
llm_response_cache: None,
|
||||
#[cfg(feature = "eval-support")]
|
||||
eval_cache: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "llm-response-cache")]
|
||||
pub fn with_llm_response_cache(&mut self, cache: Arc<dyn LlmResponseCache>) {
|
||||
self.llm_response_cache = Some(cache);
|
||||
#[cfg(feature = "eval-support")]
|
||||
pub fn with_eval_cache(&mut self, cache: Arc<dyn EvalCache>) {
|
||||
self.eval_cache = Some(cache);
|
||||
}
|
||||
|
||||
pub fn debug_info(&mut self) -> mpsc::UnboundedReceiver<ZetaDebugInfo> {
|
||||
@@ -736,9 +729,19 @@ impl Zeta {
|
||||
// TODO data collection
|
||||
let can_collect_data = cx.is_staff();
|
||||
|
||||
let mut included_files = project_state
|
||||
let empty_context_files = HashMap::default();
|
||||
let context_files = project_state
|
||||
.and_then(|project_state| project_state.context.as_ref())
|
||||
.unwrap_or(&HashMap::default())
|
||||
.unwrap_or(&empty_context_files);
|
||||
|
||||
#[cfg(feature = "eval-support")]
|
||||
let parsed_fut = futures::future::join_all(
|
||||
context_files
|
||||
.keys()
|
||||
.map(|buffer| buffer.read(cx).parsing_idle()),
|
||||
);
|
||||
|
||||
let mut included_files = context_files
|
||||
.iter()
|
||||
.filter_map(|(buffer_entity, ranges)| {
|
||||
let buffer = buffer_entity.read(cx);
|
||||
@@ -751,12 +754,19 @@ impl Zeta {
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
#[cfg(feature = "llm-response-cache")]
|
||||
let llm_response_cache = self.llm_response_cache.clone();
|
||||
included_files.sort_by(|(_, _, path_a, ranges_a), (_, _, path_b, ranges_b)| {
|
||||
(path_a, ranges_a.len()).cmp(&(path_b, ranges_b.len()))
|
||||
});
|
||||
|
||||
#[cfg(feature = "eval-support")]
|
||||
let eval_cache = self.eval_cache.clone();
|
||||
|
||||
let request_task = cx.background_spawn({
|
||||
let active_buffer = active_buffer.clone();
|
||||
async move {
|
||||
#[cfg(feature = "eval-support")]
|
||||
parsed_fut.await;
|
||||
|
||||
let index_state = if let Some(index_state) = index_state {
|
||||
Some(index_state.lock_owned().await)
|
||||
} else {
|
||||
@@ -819,17 +829,17 @@ impl Zeta {
|
||||
|
||||
let included_files = included_files
|
||||
.iter()
|
||||
.map(|(_, buffer, path, ranges)| {
|
||||
.map(|(_, snapshot, path, ranges)| {
|
||||
let excerpts = merge_excerpts(
|
||||
&buffer,
|
||||
&snapshot,
|
||||
ranges.iter().map(|range| {
|
||||
let point_range = range.to_point(&buffer);
|
||||
let point_range = range.to_point(&snapshot);
|
||||
Line(point_range.start.row)..Line(point_range.end.row)
|
||||
}),
|
||||
);
|
||||
predict_edits_v3::IncludedFile {
|
||||
path: path.clone(),
|
||||
max_row: Line(buffer.max_point().row),
|
||||
max_row: Line(snapshot.max_point().row),
|
||||
excerpts,
|
||||
}
|
||||
})
|
||||
@@ -948,8 +958,10 @@ impl Zeta {
|
||||
client,
|
||||
llm_token,
|
||||
app_version,
|
||||
#[cfg(feature = "llm-response-cache")]
|
||||
llm_response_cache,
|
||||
#[cfg(feature = "eval-support")]
|
||||
eval_cache,
|
||||
#[cfg(feature = "eval-support")]
|
||||
EvalCacheEntryKind::Prediction,
|
||||
)
|
||||
.await;
|
||||
let request_time = chrono::Utc::now() - before_request;
|
||||
@@ -1049,9 +1061,8 @@ impl Zeta {
|
||||
client: Arc<Client>,
|
||||
llm_token: LlmApiToken,
|
||||
app_version: SemanticVersion,
|
||||
#[cfg(feature = "llm-response-cache")] llm_response_cache: Option<
|
||||
Arc<dyn LlmResponseCache>,
|
||||
>,
|
||||
#[cfg(feature = "eval-support")] eval_cache: Option<Arc<dyn EvalCache>>,
|
||||
#[cfg(feature = "eval-support")] eval_cache_kind: EvalCacheEntryKind,
|
||||
) -> Result<(open_ai::Response, Option<EditPredictionUsage>)> {
|
||||
let url = if let Some(predict_edits_url) = PREDICT_EDITS_URL.as_ref() {
|
||||
http_client::Url::parse(&predict_edits_url)?
|
||||
@@ -1061,16 +1072,23 @@ impl Zeta {
|
||||
.build_zed_llm_url("/predict_edits/raw", &[])?
|
||||
};
|
||||
|
||||
#[cfg(feature = "llm-response-cache")]
|
||||
let cache_key = if let Some(cache) = llm_response_cache {
|
||||
let request_json = serde_json::to_string(&request)?;
|
||||
let key = cache.get_key(&url, &request_json);
|
||||
#[cfg(feature = "eval-support")]
|
||||
let cache_key = if let Some(cache) = eval_cache {
|
||||
use collections::FxHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
if let Some(response_str) = cache.read_response(key) {
|
||||
let mut hasher = FxHasher::default();
|
||||
url.hash(&mut hasher);
|
||||
let request_str = serde_json::to_string_pretty(&request)?;
|
||||
request_str.hash(&mut hasher);
|
||||
let hash = hasher.finish();
|
||||
|
||||
let key = (eval_cache_kind, hash);
|
||||
if let Some(response_str) = cache.read(key) {
|
||||
return Ok((serde_json::from_str(&response_str)?, None));
|
||||
}
|
||||
|
||||
Some((cache, key))
|
||||
Some((cache, request_str, key))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@@ -1088,9 +1106,9 @@ impl Zeta {
|
||||
)
|
||||
.await?;
|
||||
|
||||
#[cfg(feature = "llm-response-cache")]
|
||||
if let Some((cache, key)) = cache_key {
|
||||
cache.write_response(key, &serde_json::to_string(&response)?);
|
||||
#[cfg(feature = "eval-support")]
|
||||
if let Some((cache, request, key)) = cache_key {
|
||||
cache.write(key, &request, &serde_json::to_string_pretty(&response)?);
|
||||
}
|
||||
|
||||
Ok((response, usage))
|
||||
@@ -1361,8 +1379,8 @@ impl Zeta {
|
||||
reasoning_effort: None,
|
||||
};
|
||||
|
||||
#[cfg(feature = "llm-response-cache")]
|
||||
let llm_response_cache = self.llm_response_cache.clone();
|
||||
#[cfg(feature = "eval-support")]
|
||||
let eval_cache = self.eval_cache.clone();
|
||||
|
||||
cx.spawn(async move |this, cx| {
|
||||
log::trace!("Sending search planning request");
|
||||
@@ -1371,8 +1389,10 @@ impl Zeta {
|
||||
client,
|
||||
llm_token,
|
||||
app_version,
|
||||
#[cfg(feature = "llm-response-cache")]
|
||||
llm_response_cache,
|
||||
#[cfg(feature = "eval-support")]
|
||||
eval_cache.clone(),
|
||||
#[cfg(feature = "eval-support")]
|
||||
EvalCacheEntryKind::Context,
|
||||
)
|
||||
.await;
|
||||
let mut response = Self::handle_api_response(&this, response, cx)?;
|
||||
@@ -1421,8 +1441,14 @@ impl Zeta {
|
||||
|
||||
log::trace!("Running retrieval search: {queries:#?}");
|
||||
|
||||
let related_excerpts_result =
|
||||
retrieval_search::run_retrieval_searches(project.clone(), queries, cx).await;
|
||||
let related_excerpts_result = retrieval_search::run_retrieval_searches(
|
||||
queries,
|
||||
project.clone(),
|
||||
#[cfg(feature = "eval-support")]
|
||||
eval_cache,
|
||||
cx,
|
||||
)
|
||||
.await;
|
||||
|
||||
log::trace!("Search queries executed");
|
||||
|
||||
@@ -1772,6 +1798,34 @@ fn add_signature(
|
||||
Some(signature_index)
|
||||
}
|
||||
|
||||
#[cfg(feature = "eval-support")]
|
||||
pub type EvalCacheKey = (EvalCacheEntryKind, u64);
|
||||
|
||||
#[cfg(feature = "eval-support")]
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum EvalCacheEntryKind {
|
||||
Context,
|
||||
Search,
|
||||
Prediction,
|
||||
}
|
||||
|
||||
#[cfg(feature = "eval-support")]
|
||||
impl std::fmt::Display for EvalCacheEntryKind {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
EvalCacheEntryKind::Search => write!(f, "search"),
|
||||
EvalCacheEntryKind::Context => write!(f, "context"),
|
||||
EvalCacheEntryKind::Prediction => write!(f, "prediction"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "eval-support")]
|
||||
pub trait EvalCache: Send + Sync {
|
||||
fn read(&self, key: EvalCacheKey) -> Option<String>;
|
||||
fn write(&self, key: EvalCacheKey, input: &str, value: &str);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{path::Path, sync::Arc};
|
||||
|
||||
@@ -54,7 +54,7 @@ toml.workspace = true
|
||||
util.workspace = true
|
||||
watch.workspace = true
|
||||
zeta.workspace = true
|
||||
zeta2 = { workspace = true, features = ["llm-response-cache"] }
|
||||
zeta2 = { workspace = true, features = ["eval-support"] }
|
||||
zlog.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -14,18 +14,19 @@ use crate::{
|
||||
PromptFormat,
|
||||
example::{Example, NamedExample},
|
||||
headless::ZetaCliAppState,
|
||||
predict::{PredictionDetails, zeta2_predict},
|
||||
paths::print_run_data_dir,
|
||||
predict::{CacheMode, PredictionDetails, zeta2_predict},
|
||||
};
|
||||
|
||||
#[derive(Debug, Args)]
|
||||
pub struct EvaluateArguments {
|
||||
example_paths: Vec<PathBuf>,
|
||||
#[clap(long)]
|
||||
skip_cache: bool,
|
||||
#[arg(long, value_enum, default_value_t = PromptFormat::default())]
|
||||
prompt_format: PromptFormat,
|
||||
#[arg(long)]
|
||||
use_expected_context: bool,
|
||||
#[clap(long, value_enum, default_value_t = CacheMode::default())]
|
||||
cache: CacheMode,
|
||||
}
|
||||
|
||||
pub async fn run_evaluate(
|
||||
@@ -39,43 +40,49 @@ pub async fn run_evaluate(
|
||||
cx.spawn(async move |cx| {
|
||||
run_evaluate_one(
|
||||
&path,
|
||||
args.skip_cache,
|
||||
args.prompt_format,
|
||||
args.use_expected_context,
|
||||
args.cache,
|
||||
app_state.clone(),
|
||||
cx,
|
||||
)
|
||||
.await
|
||||
})
|
||||
});
|
||||
let all_results = futures::future::try_join_all(all_tasks).await.unwrap();
|
||||
let all_results = futures::future::try_join_all(all_tasks).await;
|
||||
|
||||
let aggregated_result = EvaluationResult {
|
||||
context: Scores::aggregate(all_results.iter().map(|r| &r.context)),
|
||||
edit_prediction: Scores::aggregate(all_results.iter().map(|r| &r.edit_prediction)),
|
||||
};
|
||||
if let Ok(all_results) = &all_results {
|
||||
let aggregated_result = EvaluationResult {
|
||||
context: Scores::aggregate(all_results.iter().map(|r| &r.context)),
|
||||
edit_prediction: Scores::aggregate(all_results.iter().map(|r| &r.edit_prediction)),
|
||||
};
|
||||
|
||||
if example_len > 1 {
|
||||
println!("\n{}", "-".repeat(80));
|
||||
println!("# TOTAL SCORES:");
|
||||
println!("{}", aggregated_result.to_markdown());
|
||||
if example_len > 1 {
|
||||
println!("\n{}", "-".repeat(80));
|
||||
println!("\n## TOTAL SCORES");
|
||||
println!("{}", aggregated_result.to_markdown());
|
||||
}
|
||||
}
|
||||
|
||||
print_run_data_dir();
|
||||
|
||||
all_results.unwrap();
|
||||
}
|
||||
|
||||
pub async fn run_evaluate_one(
|
||||
example_path: &Path,
|
||||
skip_cache: bool,
|
||||
prompt_format: PromptFormat,
|
||||
use_expected_context: bool,
|
||||
cache_mode: CacheMode,
|
||||
app_state: Arc<ZetaCliAppState>,
|
||||
cx: &mut AsyncApp,
|
||||
) -> Result<EvaluationResult> {
|
||||
let example = NamedExample::load(&example_path).unwrap();
|
||||
let predictions = zeta2_predict(
|
||||
example.clone(),
|
||||
skip_cache,
|
||||
prompt_format,
|
||||
use_expected_context,
|
||||
cache_mode,
|
||||
&app_state,
|
||||
cx,
|
||||
)
|
||||
|
||||
@@ -315,9 +315,6 @@ impl NamedExample {
|
||||
let (repo_owner, repo_name) = self.repo_name()?;
|
||||
let file_name = self.file_name();
|
||||
|
||||
fs::create_dir_all(&*REPOS_DIR)?;
|
||||
fs::create_dir_all(&*WORKTREES_DIR)?;
|
||||
|
||||
let repo_dir = REPOS_DIR.join(repo_owner.as_ref()).join(repo_name.as_ref());
|
||||
let repo_lock = lock_repo(&repo_dir).await;
|
||||
|
||||
@@ -332,7 +329,14 @@ impl NamedExample {
|
||||
}
|
||||
|
||||
// Resolve the example to a revision, fetching it if needed.
|
||||
let revision = run_git(&repo_dir, &["rev-parse", &self.example.revision]).await;
|
||||
let revision = run_git(
|
||||
&repo_dir,
|
||||
&[
|
||||
"rev-parse",
|
||||
&format!("{}^{{commit}}", self.example.revision),
|
||||
],
|
||||
)
|
||||
.await;
|
||||
let revision = if let Ok(revision) = revision {
|
||||
revision
|
||||
} else {
|
||||
@@ -349,7 +353,7 @@ impl NamedExample {
|
||||
};
|
||||
|
||||
// Create the worktree for this example if needed.
|
||||
let worktree_path = WORKTREES_DIR.join(&file_name);
|
||||
let worktree_path = WORKTREES_DIR.join(&file_name).join(repo_name.as_ref());
|
||||
if worktree_path.is_dir() {
|
||||
run_git(&worktree_path, &["clean", "--force", "-d"]).await?;
|
||||
run_git(&worktree_path, &["reset", "--hard", "HEAD"]).await?;
|
||||
@@ -394,7 +398,7 @@ impl NamedExample {
|
||||
Ok(worktree_path)
|
||||
}
|
||||
|
||||
fn file_name(&self) -> String {
|
||||
pub fn file_name(&self) -> String {
|
||||
self.name
|
||||
.chars()
|
||||
.map(|c| {
|
||||
@@ -477,7 +481,7 @@ impl NamedExample {
|
||||
let mut matches = text.match_indices(&cursor_excerpt);
|
||||
let Some((excerpt_offset, _)) = matches.next() else {
|
||||
anyhow::bail!(
|
||||
"Cursor excerpt did not exist in buffer.\nExcerpt:\n\n{cursor_excerpt}\nBuffer text:\n{text}\n"
|
||||
"\nExcerpt:\n\n{cursor_excerpt}\nBuffer text:\n{text}\n.Cursor excerpt did not exist in buffer."
|
||||
);
|
||||
};
|
||||
assert!(matches.next().is_none());
|
||||
|
||||
@@ -54,6 +54,7 @@ enum Command {
|
||||
#[arg(long, value_enum, default_value_t = ExampleFormat::Md)]
|
||||
output_format: ExampleFormat,
|
||||
},
|
||||
Clean,
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
@@ -470,6 +471,7 @@ fn main() {
|
||||
let example = NamedExample::load(path).unwrap();
|
||||
example.write(output_format, io::stdout()).unwrap();
|
||||
}
|
||||
Command::Clean => std::fs::remove_dir_all(&*crate::paths::TARGET_ZETA_DIR).unwrap(),
|
||||
};
|
||||
|
||||
let _ = cx.update(|cx| cx.quit());
|
||||
|
||||
@@ -1,16 +1,40 @@
|
||||
use std::{env, path::PathBuf, sync::LazyLock};
|
||||
|
||||
static TARGET_DIR: LazyLock<PathBuf> = LazyLock::new(|| env::current_dir().unwrap().join("target"));
|
||||
pub static CACHE_DIR: LazyLock<PathBuf> =
|
||||
LazyLock::new(|| TARGET_DIR.join("zeta-llm-response-cache"));
|
||||
pub static REPOS_DIR: LazyLock<PathBuf> = LazyLock::new(|| TARGET_DIR.join("zeta-repos"));
|
||||
pub static WORKTREES_DIR: LazyLock<PathBuf> = LazyLock::new(|| TARGET_DIR.join("zeta-worktrees"));
|
||||
pub static LOGS_DIR: LazyLock<PathBuf> = LazyLock::new(|| TARGET_DIR.join("zeta-logs"));
|
||||
pub static LOGS_SEARCH_PROMPT: LazyLock<PathBuf> =
|
||||
LazyLock::new(|| LOGS_DIR.join("search_prompt.md"));
|
||||
pub static LOGS_SEARCH_QUERIES: LazyLock<PathBuf> =
|
||||
LazyLock::new(|| LOGS_DIR.join("search_queries.json"));
|
||||
pub static LOGS_PREDICTION_PROMPT: LazyLock<PathBuf> =
|
||||
LazyLock::new(|| LOGS_DIR.join("prediction_prompt.md"));
|
||||
pub static LOGS_PREDICTION_RESPONSE: LazyLock<PathBuf> =
|
||||
LazyLock::new(|| LOGS_DIR.join("prediction_response.md"));
|
||||
pub static TARGET_ZETA_DIR: LazyLock<PathBuf> =
|
||||
LazyLock::new(|| env::current_dir().unwrap().join("target/zeta"));
|
||||
pub static CACHE_DIR: LazyLock<PathBuf> = LazyLock::new(|| TARGET_ZETA_DIR.join("cache"));
|
||||
pub static REPOS_DIR: LazyLock<PathBuf> = LazyLock::new(|| TARGET_ZETA_DIR.join("repos"));
|
||||
pub static WORKTREES_DIR: LazyLock<PathBuf> = LazyLock::new(|| TARGET_ZETA_DIR.join("worktrees"));
|
||||
pub static RUN_DIR: LazyLock<PathBuf> = LazyLock::new(|| {
|
||||
TARGET_ZETA_DIR
|
||||
.join("runs")
|
||||
.join(chrono::Local::now().format("%d-%m-%y-%H_%M_%S").to_string())
|
||||
});
|
||||
pub static LATEST_EXAMPLE_RUN_DIR: LazyLock<PathBuf> =
|
||||
LazyLock::new(|| TARGET_ZETA_DIR.join("latest"));
|
||||
|
||||
pub fn print_run_data_dir() {
|
||||
println!("\n## Run Data\n");
|
||||
|
||||
let current_dir = std::env::current_dir().unwrap();
|
||||
for file in std::fs::read_dir(&*RUN_DIR).unwrap() {
|
||||
let file = file.unwrap();
|
||||
if file.file_type().unwrap().is_dir() {
|
||||
for file in std::fs::read_dir(file.path()).unwrap() {
|
||||
let path = file.unwrap().path();
|
||||
let path = path.strip_prefix(¤t_dir).unwrap_or(&path);
|
||||
println!(
|
||||
"- {}/\x1b[34m{}\x1b[0m",
|
||||
path.parent().unwrap().display(),
|
||||
path.file_name().unwrap().display(),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
let path = file.path();
|
||||
println!(
|
||||
"- {} ",
|
||||
path.strip_prefix(¤t_dir).unwrap_or(&path).display()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,20 +1,15 @@
|
||||
use crate::PromptFormat;
|
||||
use crate::example::{ActualExcerpt, ExpectedExcerpt, NamedExample};
|
||||
use crate::headless::ZetaCliAppState;
|
||||
use crate::paths::{
|
||||
CACHE_DIR, LOGS_DIR, LOGS_PREDICTION_PROMPT, LOGS_PREDICTION_RESPONSE, LOGS_SEARCH_PROMPT,
|
||||
LOGS_SEARCH_QUERIES,
|
||||
};
|
||||
use crate::paths::{CACHE_DIR, LATEST_EXAMPLE_RUN_DIR, RUN_DIR, print_run_data_dir};
|
||||
use ::serde::Serialize;
|
||||
use anyhow::{Result, anyhow};
|
||||
use clap::Args;
|
||||
use collections::HashMap;
|
||||
use gpui::http_client::Url;
|
||||
use language::{Anchor, Buffer, Point};
|
||||
// use cloud_llm_client::predict_edits_v3::PromptFormat;
|
||||
use anyhow::{Context, Result, anyhow};
|
||||
use clap::{Args, ValueEnum};
|
||||
use cloud_zeta2_prompt::{CURSOR_MARKER, write_codeblock};
|
||||
use collections::HashMap;
|
||||
use futures::StreamExt as _;
|
||||
use gpui::{AppContext, AsyncApp, Entity};
|
||||
use language::{Anchor, Buffer, Point};
|
||||
use project::Project;
|
||||
use serde::Deserialize;
|
||||
use std::cell::Cell;
|
||||
@@ -25,7 +20,7 @@ use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use std::time::{Duration, Instant};
|
||||
use zeta2::LlmResponseCache;
|
||||
use zeta2::{EvalCache, EvalCacheEntryKind, EvalCacheKey};
|
||||
|
||||
#[derive(Debug, Args)]
|
||||
pub struct PredictArguments {
|
||||
@@ -36,8 +31,31 @@ pub struct PredictArguments {
|
||||
#[clap(long, short, value_enum, default_value_t = PredictionsOutputFormat::Md)]
|
||||
format: PredictionsOutputFormat,
|
||||
example_path: PathBuf,
|
||||
#[clap(long)]
|
||||
skip_cache: bool,
|
||||
#[clap(long, value_enum, default_value_t = CacheMode::default())]
|
||||
cache: CacheMode,
|
||||
}
|
||||
|
||||
#[derive(Debug, ValueEnum, Default, Clone, Copy)]
|
||||
pub enum CacheMode {
|
||||
/// Use cached LLM requests and responses, based on the hash of the prompt and the endpoint.
|
||||
#[default]
|
||||
#[value(alias = "request")]
|
||||
Requests,
|
||||
/// Ignore existing cache entries for both LLM and search.
|
||||
Skip,
|
||||
/// Use cached LLM responses AND search results for full determinism. Fails if they haven't been cached yet.
|
||||
/// Useful for reproducing results and fixing bugs outside of search queries
|
||||
Force,
|
||||
}
|
||||
|
||||
impl CacheMode {
|
||||
fn use_cached_llm_responses(&self) -> bool {
|
||||
matches!(self, CacheMode::Requests | CacheMode::Force)
|
||||
}
|
||||
|
||||
fn use_cached_search_results(&self) -> bool {
|
||||
matches!(self, CacheMode::Force)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(clap::ValueEnum, Debug, Clone)]
|
||||
@@ -55,9 +73,9 @@ pub async fn run_zeta2_predict(
|
||||
let example = NamedExample::load(args.example_path).unwrap();
|
||||
let result = zeta2_predict(
|
||||
example,
|
||||
args.skip_cache,
|
||||
args.prompt_format,
|
||||
args.use_expected_context,
|
||||
args.cache,
|
||||
&app_state,
|
||||
cx,
|
||||
)
|
||||
@@ -65,14 +83,7 @@ pub async fn run_zeta2_predict(
|
||||
.unwrap();
|
||||
result.write(args.format, std::io::stdout()).unwrap();
|
||||
|
||||
println!("## Logs\n");
|
||||
println!("Search prompt: {}", LOGS_SEARCH_PROMPT.display());
|
||||
println!("Search queries: {}", LOGS_SEARCH_QUERIES.display());
|
||||
println!("Prediction prompt: {}", LOGS_PREDICTION_PROMPT.display());
|
||||
println!(
|
||||
"Prediction response: {}",
|
||||
LOGS_PREDICTION_RESPONSE.display()
|
||||
);
|
||||
print_run_data_dir();
|
||||
}
|
||||
|
||||
thread_local! {
|
||||
@@ -81,13 +92,12 @@ thread_local! {
|
||||
|
||||
pub async fn zeta2_predict(
|
||||
example: NamedExample,
|
||||
skip_cache: bool,
|
||||
prompt_format: PromptFormat,
|
||||
use_expected_context: bool,
|
||||
cache_mode: CacheMode,
|
||||
app_state: &Arc<ZetaCliAppState>,
|
||||
cx: &mut AsyncApp,
|
||||
) -> Result<PredictionDetails> {
|
||||
fs::create_dir_all(&*LOGS_DIR)?;
|
||||
let worktree_path = example.setup_worktree().await?;
|
||||
|
||||
if !AUTHENTICATED.get() {
|
||||
@@ -126,8 +136,25 @@ pub async fn zeta2_predict(
|
||||
|
||||
let zeta = cx.update(|cx| zeta2::Zeta::global(&app_state.client, &app_state.user_store, cx))?;
|
||||
|
||||
let example_run_dir = RUN_DIR.join(&example.file_name());
|
||||
fs::create_dir_all(&example_run_dir)?;
|
||||
if LATEST_EXAMPLE_RUN_DIR.exists() {
|
||||
fs::remove_file(&*LATEST_EXAMPLE_RUN_DIR)?;
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
std::os::unix::fs::symlink(&example_run_dir, &*LATEST_EXAMPLE_RUN_DIR)
|
||||
.context("creating latest link")?;
|
||||
|
||||
#[cfg(windows)]
|
||||
std::os::windows::fs::symlink_dir(&example_run_dir, &*LATEST_EXAMPLE_RUN_DIR)
|
||||
.context("creating latest link")?;
|
||||
|
||||
zeta.update(cx, |zeta, _cx| {
|
||||
zeta.with_llm_response_cache(Arc::new(Cache { skip_cache }));
|
||||
zeta.with_eval_cache(Arc::new(RunCache {
|
||||
example_run_dir: example_run_dir.clone(),
|
||||
cache_mode,
|
||||
}));
|
||||
})?;
|
||||
|
||||
cx.subscribe(&buffer_store, {
|
||||
@@ -159,12 +186,15 @@ pub async fn zeta2_predict(
|
||||
match event {
|
||||
zeta2::ZetaDebugInfo::ContextRetrievalStarted(info) => {
|
||||
start_time = Some(info.timestamp);
|
||||
fs::write(&*LOGS_SEARCH_PROMPT, &info.search_prompt)?;
|
||||
fs::write(
|
||||
example_run_dir.join("search_prompt.md"),
|
||||
&info.search_prompt,
|
||||
)?;
|
||||
}
|
||||
zeta2::ZetaDebugInfo::SearchQueriesGenerated(info) => {
|
||||
search_queries_generated_at = Some(info.timestamp);
|
||||
fs::write(
|
||||
&*LOGS_SEARCH_QUERIES,
|
||||
example_run_dir.join("search_queries.json"),
|
||||
serde_json::to_string_pretty(&info.search_queries).unwrap(),
|
||||
)?;
|
||||
}
|
||||
@@ -176,7 +206,7 @@ pub async fn zeta2_predict(
|
||||
let prediction_started_at = Instant::now();
|
||||
start_time.get_or_insert(prediction_started_at);
|
||||
fs::write(
|
||||
&*LOGS_PREDICTION_PROMPT,
|
||||
example_run_dir.join("prediction_prompt.md"),
|
||||
&request.local_prompt.unwrap_or_default(),
|
||||
)?;
|
||||
|
||||
@@ -210,7 +240,7 @@ pub async fn zeta2_predict(
|
||||
let response = request.response_rx.await?.0.map_err(|err| anyhow!(err))?;
|
||||
let response = zeta2::text_from_response(response).unwrap_or_default();
|
||||
let prediction_finished_at = Instant::now();
|
||||
fs::write(&*LOGS_PREDICTION_RESPONSE, &response)?;
|
||||
fs::write(example_run_dir.join("prediction_response.md"), &response)?;
|
||||
|
||||
let mut result = result.lock().unwrap();
|
||||
|
||||
@@ -328,48 +358,69 @@ async fn resolve_context_entry(
|
||||
Ok((buffer, ranges))
|
||||
}
|
||||
|
||||
struct Cache {
|
||||
skip_cache: bool,
|
||||
struct RunCache {
|
||||
cache_mode: CacheMode,
|
||||
example_run_dir: PathBuf,
|
||||
}
|
||||
|
||||
impl Cache {
|
||||
fn path(key: u64) -> PathBuf {
|
||||
CACHE_DIR.join(format!("{key:x}.json"))
|
||||
impl RunCache {
|
||||
fn output_cache_path((kind, key): &EvalCacheKey) -> PathBuf {
|
||||
CACHE_DIR.join(format!("{kind}_out_{key:x}.json",))
|
||||
}
|
||||
|
||||
fn input_cache_path((kind, key): &EvalCacheKey) -> PathBuf {
|
||||
CACHE_DIR.join(format!("{kind}_in_{key:x}.json",))
|
||||
}
|
||||
|
||||
fn link_to_run(&self, key: &EvalCacheKey) {
|
||||
let output_link_path = self.example_run_dir.join(format!("{}_out.json", key.0));
|
||||
fs::hard_link(Self::output_cache_path(key), &output_link_path).unwrap();
|
||||
|
||||
let input_link_path = self.example_run_dir.join(format!("{}_in.json", key.0));
|
||||
fs::hard_link(Self::input_cache_path(key), &input_link_path).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
impl LlmResponseCache for Cache {
|
||||
fn get_key(&self, url: &Url, body: &str) -> u64 {
|
||||
use collections::FxHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
impl EvalCache for RunCache {
|
||||
fn read(&self, key: EvalCacheKey) -> Option<String> {
|
||||
let path = RunCache::output_cache_path(&key);
|
||||
|
||||
let mut hasher = FxHasher::default();
|
||||
url.hash(&mut hasher);
|
||||
body.hash(&mut hasher);
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
fn read_response(&self, key: u64) -> Option<String> {
|
||||
let path = Cache::path(key);
|
||||
if path.exists() {
|
||||
if self.skip_cache {
|
||||
log::info!("Skipping existing cached LLM response: {}", path.display());
|
||||
None
|
||||
} else {
|
||||
log::info!("Using LLM response from cache: {}", path.display());
|
||||
let use_cache = match key.0 {
|
||||
EvalCacheEntryKind::Search => self.cache_mode.use_cached_search_results(),
|
||||
EvalCacheEntryKind::Context | EvalCacheEntryKind::Prediction => {
|
||||
self.cache_mode.use_cached_llm_responses()
|
||||
}
|
||||
};
|
||||
if use_cache {
|
||||
log::info!("Using cache entry: {}", path.display());
|
||||
self.link_to_run(&key);
|
||||
Some(fs::read_to_string(path).unwrap())
|
||||
} else {
|
||||
log::info!("Skipping cached entry: {}", path.display());
|
||||
None
|
||||
}
|
||||
} else if matches!(self.cache_mode, CacheMode::Force) {
|
||||
panic!(
|
||||
"No cached entry found for {:?}. Run without `--cache force` at least once.",
|
||||
key.0
|
||||
);
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn write_response(&self, key: u64, value: &str) {
|
||||
fn write(&self, key: EvalCacheKey, input: &str, output: &str) {
|
||||
fs::create_dir_all(&*CACHE_DIR).unwrap();
|
||||
|
||||
let path = Cache::path(key);
|
||||
log::info!("Writing LLM response to cache: {}", path.display());
|
||||
fs::write(path, value).unwrap();
|
||||
let input_path = RunCache::input_cache_path(&key);
|
||||
fs::write(&input_path, input).unwrap();
|
||||
|
||||
let output_path = RunCache::output_cache_path(&key);
|
||||
log::info!("Writing cache entry: {}", output_path.display());
|
||||
fs::write(&output_path, output).unwrap();
|
||||
|
||||
self.link_to_run(&key);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3586,6 +3586,7 @@ List of `integer` column numbers
|
||||
"option_as_meta": false,
|
||||
"button": true,
|
||||
"shell": "system",
|
||||
"scroll_multiplier": 3.0,
|
||||
"toolbar": {
|
||||
"breadcrumbs": false
|
||||
},
|
||||
@@ -3998,6 +3999,26 @@ Disable with:
|
||||
}
|
||||
```
|
||||
|
||||
### Terminal: Scroll Multiplier
|
||||
|
||||
- Description: The multiplier for scrolling speed in the terminal when using mouse wheel or trackpad.
|
||||
- Setting: `scroll_multiplier`
|
||||
- Default: `1.0`
|
||||
|
||||
**Options**
|
||||
|
||||
Positive floating point values. Values less than or equal to 0 will be clamped to a minimum of 0.01.
|
||||
|
||||
**Example**
|
||||
|
||||
```json
|
||||
{
|
||||
"terminal": {
|
||||
"scroll_multiplier": 5.0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Terminal: Toolbar
|
||||
|
||||
- Description: Whether or not to show various elements in the terminal toolbar.
|
||||
|
||||
@@ -206,7 +206,7 @@ If you are struggling with connection issues, you should be able to see more inf
|
||||
|
||||
## Supported SSH Options
|
||||
|
||||
Under the hood, Zed shells out to the `ssh` binary to connect to the remote server. We create one SSH control master per project, and use then use that to multiplex SSH connections for the Zed protocol itself, any terminals you open and tasks you run. We read settings from your SSH config file, but if you want to specify additional options to the SSH control master you can configure Zed to set them.
|
||||
Under the hood, Zed shells out to the `ssh` binary to connect to the remote server. We create one SSH control master per project, and then use that to multiplex SSH connections for the Zed protocol itself, any terminals you open and tasks you run. We read settings from your SSH config file, but if you want to specify additional options to the SSH control master you can configure Zed to set them.
|
||||
|
||||
When typing in the "Connect New Server" dialog, you can use bash-style quoting to pass options containing a space. Once you have created a server it will be added to the `"ssh_connections": []` array in your settings file. You can edit the settings file directly to make changes to SSH connections.
|
||||
|
||||
|
||||
Reference in New Issue
Block a user