Compare commits
16 Commits
git-clone
...
split-quer
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3a9e53d561 | ||
|
|
7ee2511b89 | ||
|
|
1eb90e0400 | ||
|
|
72e73544bb | ||
|
|
586cf40d63 | ||
|
|
18f2fc9aa3 | ||
|
|
b1beed4ca9 | ||
|
|
8d37c1c6ea | ||
|
|
0680f6469b | ||
|
|
57a543ebe8 | ||
|
|
d69fb469bd | ||
|
|
eb413ba404 | ||
|
|
9182bd51f2 | ||
|
|
481a554923 | ||
|
|
c129e3b668 | ||
|
|
12e3c3b502 |
@@ -397,7 +397,13 @@
|
||||
// 3. "gpt-4-turbo-preview"
|
||||
// 4. "gpt-4o"
|
||||
"default_model": "gpt-4o"
|
||||
}
|
||||
},
|
||||
// Whether to enable the /auto command in the assistant panel, which infers context.
|
||||
// Enabling this also enables indexing all the files in the project to generate
|
||||
// metadata used in context inference. The first time a project is indexed, indexing
|
||||
// can take a long time and use a lot of system resources. Later indexing is incremental
|
||||
// and much faster.
|
||||
"infer_context": false
|
||||
},
|
||||
// Whether the screen sharing icon is shown in the os status bar.
|
||||
"show_call_status_icon": true,
|
||||
|
||||
@@ -26,8 +26,9 @@ use semantic_index::{CloudEmbeddingProvider, SemanticIndex};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use settings::{Settings, SettingsStore};
|
||||
use slash_command::{
|
||||
active_command, default_command, diagnostics_command, fetch_command, file_command, now_command,
|
||||
project_command, prompt_command, rustdoc_command, search_command, tabs_command, term_command,
|
||||
active_command, auto_command, default_command, diagnostics_command, fetch_command,
|
||||
file_command, now_command, project_command, prompt_command, rustdoc_command, search_command,
|
||||
tabs_command, term_command,
|
||||
};
|
||||
use std::{
|
||||
fmt::{self, Display},
|
||||
@@ -138,7 +139,7 @@ impl LanguageModel {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
|
||||
#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
|
||||
pub struct LanguageModelRequestMessage {
|
||||
pub role: Role,
|
||||
pub content: String,
|
||||
@@ -159,7 +160,7 @@ impl LanguageModelRequestMessage {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize)]
|
||||
#[derive(Clone, Debug, Default, Serialize)]
|
||||
pub struct LanguageModelRequest {
|
||||
pub model: LanguageModel,
|
||||
pub messages: Vec<LanguageModelRequestMessage>,
|
||||
@@ -310,6 +311,7 @@ pub fn init(fs: Arc<dyn Fs>, client: Arc<Client>, cx: &mut AppContext) {
|
||||
|
||||
fn register_slash_commands(cx: &mut AppContext) {
|
||||
let slash_command_registry = SlashCommandRegistry::global(cx);
|
||||
slash_command_registry.register_command(auto_command::AutoCommand, true);
|
||||
slash_command_registry.register_command(file_command::FileSlashCommand, true);
|
||||
slash_command_registry.register_command(active_command::ActiveSlashCommand, true);
|
||||
slash_command_registry.register_command(tabs_command::TabsSlashCommand, true);
|
||||
|
||||
@@ -225,6 +225,7 @@ pub struct AssistantSettings {
|
||||
pub default_width: Pixels,
|
||||
pub default_height: Pixels,
|
||||
pub provider: AssistantProvider,
|
||||
pub infer_context: bool,
|
||||
}
|
||||
|
||||
/// Assistant panel settings
|
||||
@@ -282,6 +283,7 @@ impl AssistantSettingsContent {
|
||||
}
|
||||
})
|
||||
},
|
||||
infer_context: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -381,6 +383,7 @@ impl Default for VersionedAssistantSettingsContent {
|
||||
default_width: None,
|
||||
default_height: None,
|
||||
provider: None,
|
||||
infer_context: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -412,6 +415,14 @@ pub struct AssistantSettingsContentV1 {
|
||||
/// This can either be the internal `zed.dev` service or an external `openai` service,
|
||||
/// each with their respective default models and configurations.
|
||||
provider: Option<AssistantProviderContent>,
|
||||
/// When using the assistant panel, enable the /auto command to automatically
|
||||
/// infer context. Enabling this will enable background indexing of the project,
|
||||
/// to generate the metadata /auto needs to infer context. The first time a project
|
||||
/// is indexed, the indexing process can take a long time and use a lot of system
|
||||
/// resources. After the first time, later indexing is incremental and much faster.
|
||||
///
|
||||
/// Default: false
|
||||
infer_context: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize, JsonSchema, Debug)]
|
||||
@@ -466,6 +477,7 @@ impl Settings for AssistantSettings {
|
||||
&mut settings.default_height,
|
||||
value.default_height.map(Into::into),
|
||||
);
|
||||
merge(&mut settings.infer_context, value.infer_context);
|
||||
if let Some(provider) = value.provider.clone() {
|
||||
match (&mut settings.provider, provider) {
|
||||
(
|
||||
|
||||
@@ -18,6 +18,7 @@ use ui::ActiveTheme;
|
||||
use workspace::Workspace;
|
||||
|
||||
pub mod active_command;
|
||||
pub mod auto_command;
|
||||
pub mod default_command;
|
||||
pub mod diagnostics_command;
|
||||
pub mod fetch_command;
|
||||
|
||||
242
crates/assistant/src/slash_command/auto_command.rs
Normal file
242
crates/assistant/src/slash_command/auto_command.rs
Normal file
@@ -0,0 +1,242 @@
|
||||
use super::create_label_for_command;
|
||||
use super::{SlashCommand, SlashCommandOutput};
|
||||
use crate::{CompletionProvider, LanguageModelRequest, LanguageModelRequestMessage, Role};
|
||||
use anyhow::{anyhow, Result};
|
||||
use futures::StreamExt;
|
||||
use gpui::{AppContext, AsyncAppContext, Task, WeakView};
|
||||
use language::{CodeLabel, LspAdapterDelegate};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::{atomic::AtomicBool, Arc};
|
||||
use ui::WindowContext;
|
||||
use workspace::Workspace;
|
||||
|
||||
pub(crate) struct AutoCommand;
|
||||
|
||||
impl SlashCommand for AutoCommand {
|
||||
fn name(&self) -> String {
|
||||
"auto".into()
|
||||
}
|
||||
|
||||
fn description(&self) -> String {
|
||||
"Automatically infer what context to add, based on your prompt".into()
|
||||
}
|
||||
|
||||
fn menu_text(&self) -> String {
|
||||
"Automatically Infer Context".into()
|
||||
}
|
||||
|
||||
fn label(&self, cx: &AppContext) -> CodeLabel {
|
||||
create_label_for_command("auto", &["--prompt"], cx)
|
||||
}
|
||||
|
||||
fn complete_argument(
|
||||
self: Arc<Self>,
|
||||
_query: String,
|
||||
_cancellation_flag: Arc<AtomicBool>,
|
||||
_workspace: Option<WeakView<Workspace>>,
|
||||
_cx: &mut AppContext,
|
||||
) -> Task<Result<Vec<String>>> {
|
||||
// There's no autocomplete for a prompt, since it's arbitrary text.
|
||||
Task::ready(Ok(Vec::new()))
|
||||
}
|
||||
|
||||
fn requires_argument(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn run(
|
||||
self: Arc<Self>,
|
||||
argument: Option<&str>,
|
||||
_workspace: WeakView<Workspace>,
|
||||
_delegate: Arc<dyn LspAdapterDelegate>,
|
||||
cx: &mut WindowContext,
|
||||
) -> Task<Result<SlashCommandOutput>> {
|
||||
let Some(argument) = argument else {
|
||||
return Task::ready(Err(anyhow!("missing prompt")));
|
||||
};
|
||||
|
||||
// to_string() is needed so it can live long enough to be used in cx.spawn
|
||||
let original_prompt = argument.to_string();
|
||||
let task = cx.spawn(|cx: gpui::AsyncWindowContext| async move {
|
||||
let summaries: Vec<FileSummary> = serde_json::from_str(SUMMARY).unwrap_or_else(|_| {
|
||||
// Since we generate the JSON ourselves, this parsing should never fail. If it does, that's a bug.
|
||||
log::error!("JSON parsing of project file summaries failed");
|
||||
|
||||
// Handle this gracefully by not including any summaries. Assistant results
|
||||
// will be worse than if we actually had summaries, but we won't block the user.
|
||||
Vec::new()
|
||||
});
|
||||
|
||||
commands_for_summaries(&summaries, &original_prompt, &cx).await
|
||||
});
|
||||
|
||||
// As a convenience, append /auto's argument to the end of the prompt
|
||||
// so you don't have to write it again.
|
||||
let original_prompt = argument.to_string();
|
||||
|
||||
cx.background_executor().spawn(async move {
|
||||
let commands = task.await?;
|
||||
let mut prompt = String::new();
|
||||
|
||||
log::info!(
|
||||
"Translating this response into slash-commands: {:?}",
|
||||
commands
|
||||
);
|
||||
|
||||
for command in commands {
|
||||
prompt.push('/');
|
||||
prompt.push_str(&command.name);
|
||||
prompt.push(' ');
|
||||
prompt.push_str(&command.arg);
|
||||
prompt.push('\n');
|
||||
}
|
||||
|
||||
prompt.push('\n');
|
||||
prompt.push_str(&original_prompt);
|
||||
|
||||
Ok(SlashCommandOutput {
|
||||
text: prompt,
|
||||
sections: Vec::new(),
|
||||
run_commands_in_text: true,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const PROMPT_INSTRUCTIONS_BEFORE_SUMMARY: &str = include_str!("prompt_before_summary.txt");
|
||||
const PROMPT_INSTRUCTIONS_AFTER_SUMMARY: &str = include_str!("prompt_after_summary.txt");
|
||||
const SUMMARY: &str = include_str!("/Users/rtfeldman/code/summarize-dir/combined_summaries.json");
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct FileSummary {
|
||||
filename: String,
|
||||
summary: String,
|
||||
}
|
||||
|
||||
fn summaries_prompt(summaries: &[FileSummary], original_prompt: &str) -> String {
|
||||
let json_summaries = serde_json::to_string(summaries).unwrap();
|
||||
|
||||
format!("{PROMPT_INSTRUCTIONS_BEFORE_SUMMARY}\n{json_summaries}\n{PROMPT_INSTRUCTIONS_AFTER_SUMMARY}\n{original_prompt}")
|
||||
}
|
||||
|
||||
/// The slash commands that the model is told about, and which we look for in the inference response.
|
||||
const SUPPORTED_SLASH_COMMANDS: &[&str] = &["search", "file"];
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct CommandToRun {
|
||||
name: String,
|
||||
arg: String,
|
||||
}
|
||||
|
||||
/// Given the pre-indexed file summaries for this project, as well as the original prompt
|
||||
/// string passed to `/auto`, get a list of slash commands to run, along with their arguments.
|
||||
///
|
||||
/// The prompt's output does not include the slashes (to reduce the chance that it makes a mistake),
|
||||
/// so taking one of these returned Strings and turning it into a real slash-command-with-argument
|
||||
/// involves prepending a slash to it.
|
||||
///
|
||||
/// This function will validate that each of the returned lines begins with one of SUPPORTED_SLASH_COMMANDS.
|
||||
/// Any other lines it encounters will be discarded, with a warning logged.
|
||||
async fn commands_for_summaries(
|
||||
summaries: &[FileSummary],
|
||||
original_prompt: &str,
|
||||
cx: &AsyncAppContext,
|
||||
) -> Result<Vec<CommandToRun>> {
|
||||
if summaries.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let model = cx.update(|cx| CompletionProvider::global(cx).model())?;
|
||||
let max_token_count = model.max_token_count();
|
||||
|
||||
// Rather than recursing (which would require this async function use a pinned box),
|
||||
// we use an explicit stack of arguments and answers for when we need to "recurse."
|
||||
let mut stack = vec![(summaries, String::new())];
|
||||
let mut final_response = Vec::new();
|
||||
|
||||
while let Some((current_summaries, mut accumulated_response)) = stack.pop() {
|
||||
// The split can result in one slice being empty and the other having one element.
|
||||
// Whenever that happens, skip the empty one.
|
||||
if current_summaries.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
log::info!(
|
||||
"Inferring prompt context using {} file summaries",
|
||||
current_summaries.len()
|
||||
);
|
||||
|
||||
let request = LanguageModelRequest {
|
||||
model: model.clone(),
|
||||
messages: vec![LanguageModelRequestMessage {
|
||||
role: Role::User,
|
||||
content: summaries_prompt(¤t_summaries, original_prompt),
|
||||
}],
|
||||
stop: Vec::new(),
|
||||
temperature: 1.0,
|
||||
};
|
||||
|
||||
let token_count = cx
|
||||
.update(|cx| CompletionProvider::global(cx).count_tokens(request.clone(), cx))?
|
||||
.await?;
|
||||
|
||||
if token_count < max_token_count {
|
||||
let mut response_chunks = cx
|
||||
.update(|cx| CompletionProvider::global(cx).complete(request))?
|
||||
.await?;
|
||||
|
||||
while let Some(chunk) = response_chunks.next().await {
|
||||
accumulated_response.push_str(&chunk?);
|
||||
}
|
||||
|
||||
for line in accumulated_response.split('\n') {
|
||||
if let Some(first_space) = line.find(' ') {
|
||||
let command = &line[..first_space].trim();
|
||||
let arg = &line[first_space..].trim();
|
||||
|
||||
// Don't return empty or duplicate or duplicate commands
|
||||
if !command.is_empty()
|
||||
&& !final_response
|
||||
.iter()
|
||||
.any(|cmd: &CommandToRun| cmd.name == *command && cmd.arg == *arg)
|
||||
{
|
||||
if SUPPORTED_SLASH_COMMANDS
|
||||
.iter()
|
||||
.any(|supported| command == supported)
|
||||
{
|
||||
final_response.push(CommandToRun {
|
||||
name: command.to_string(),
|
||||
arg: arg.to_string(),
|
||||
});
|
||||
} else {
|
||||
log::warn!(
|
||||
"Context inference returned an unrecognized slash-commend line: {:?}",
|
||||
line
|
||||
);
|
||||
}
|
||||
}
|
||||
} else if !line.trim().is_empty() {
|
||||
// All slash-commands currently supported in context inference need a space for the argument.
|
||||
log::warn!(
|
||||
"Context inference returned a non-blank line that contained no spaces (meaning no argument for the slash-command): {:?}",
|
||||
line
|
||||
);
|
||||
}
|
||||
}
|
||||
} else if current_summaries.len() == 1 {
|
||||
log::warn!("Inferring context for a single file's summary failed because the prompt's token length exceeded the model's token limit.");
|
||||
} else {
|
||||
log::info!(
|
||||
"Context inference using file summaries resulted in a prompt containing {token_count} tokens, which exceeded the model's max of {max_token_count}. Retrying as two separate prompts, each including half the number of summaries.",
|
||||
);
|
||||
let (left, right) = current_summaries.split_at(current_summaries.len() / 2);
|
||||
stack.push((right, accumulated_response.clone()));
|
||||
stack.push((left, accumulated_response));
|
||||
}
|
||||
}
|
||||
|
||||
// Sort the commands by name (reversed just so that /search appears before /file)
|
||||
final_response.sort_by(|cmd1, cmd2| cmd1.name.cmp(&cmd2.name).reverse());
|
||||
|
||||
Ok(final_response)
|
||||
}
|
||||
24
crates/assistant/src/slash_command/prompt_after_summary.txt
Normal file
24
crates/assistant/src/slash_command/prompt_after_summary.txt
Normal file
@@ -0,0 +1,24 @@
|
||||
Actions have a cost, so only include actions that you think
|
||||
will be helpful to you in doing a great job answering the
|
||||
prompt in the future.
|
||||
|
||||
You must respond ONLY with a list of actions you would like to
|
||||
perform. Each action should be on its own line, and followed by a space and then its parameter.
|
||||
|
||||
Actions can be performed more than once with different parameters.
|
||||
Here is an example valid response:
|
||||
|
||||
```
|
||||
file path/to/my/file.txt
|
||||
file path/to/another/file.txt
|
||||
search something to search for
|
||||
search something else to search for
|
||||
```
|
||||
|
||||
Once again, do not forget: you must respond ONLY in the format of
|
||||
one action per line, and the action name should be followed by
|
||||
its parameter. Your response must not include anything other
|
||||
than a list of actions, with one action per line, in this format.
|
||||
It is extremely important that you do not deviate from this format even slightly!
|
||||
|
||||
This is the end of my instructions for how to respond. The rest is the prompt:
|
||||
31
crates/assistant/src/slash_command/prompt_before_summary.txt
Normal file
31
crates/assistant/src/slash_command/prompt_before_summary.txt
Normal file
@@ -0,0 +1,31 @@
|
||||
I'm going to give you a prompt. I don't want you to respond
|
||||
to the prompt itself. I want you to figure out which of the following
|
||||
actions on my project, if any, would help you answer the prompt.
|
||||
|
||||
Here are the actions:
|
||||
|
||||
## file
|
||||
|
||||
This action's parameter is a file path to one of the files
|
||||
in the project. If you ask for this action, I will tell you
|
||||
the full contents of the file, so you can learn all the
|
||||
details of the file.
|
||||
|
||||
## search
|
||||
|
||||
This action's parameter is a string to do a semantic search for
|
||||
across the files in the project. (You will have a JSON summary
|
||||
of all the files in the project.) It will tell you which files this string
|
||||
(or similar strings; it is a semantic search) appear in,
|
||||
as well as some context of the lines surrounding each result.
|
||||
It's very important that you only use this action when you think
|
||||
that searching across the specific files in this project for the query
|
||||
in question will be useful. For example, don't use this command to search
|
||||
for queries you might put into a general Web search engine, because those
|
||||
will be too general to give useful results in this project-specific search.
|
||||
|
||||
---
|
||||
|
||||
That was the end of the list of actions.
|
||||
|
||||
Here is a JSON summary of each of the files in my project:
|
||||
Reference in New Issue
Block a user