Compare commits
6 Commits
test-test
...
claude-exp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4d68261013 | ||
|
|
bcd0e8b6e5 | ||
|
|
00f74af4e0 | ||
|
|
425291f0ae | ||
|
|
baf7bae6bd | ||
|
|
565782a1c7 |
@@ -10,15 +10,3 @@
|
||||
# Here, we opted to use `[target.'cfg(all())']` instead of `[build]` because `[target.'**']` is guaranteed to be cumulative.
|
||||
[target.'cfg(all())']
|
||||
rustflags = ["-D", "warnings"]
|
||||
|
||||
# Use Mold on Linux, because it's faster than GNU ld and LLD.
|
||||
#
|
||||
# We no longer set this in the default `config.toml` so that developers can opt in to Wild, which
|
||||
# is faster than Mold, in their own ~/.cargo/config.toml.
|
||||
[target.x86_64-unknown-linux-gnu]
|
||||
linker = "clang"
|
||||
rustflags = ["-C", "link-arg=-fuse-ld=mold"]
|
||||
|
||||
[target.aarch64-unknown-linux-gnu]
|
||||
linker = "clang"
|
||||
rustflags = ["-C", "link-arg=-fuse-ld=mold"]
|
||||
|
||||
@@ -4,9 +4,14 @@ rustflags = ["-C", "symbol-mangling-version=v0", "--cfg", "tokio_unstable"]
|
||||
|
||||
[alias]
|
||||
xtask = "run --package xtask --"
|
||||
perf-test = ["test", "--profile", "release-fast", "--lib", "--bins", "--tests", "--all-features", "--config", "target.'cfg(true)'.runner='cargo run -p perf --release'", "--config", "target.'cfg(true)'.rustflags=[\"--cfg\", \"perf_enabled\"]"]
|
||||
# Keep similar flags here to share some ccache
|
||||
perf-compare = ["run", "--profile", "release-fast", "-p", "perf", "--config", "target.'cfg(true)'.rustflags=[\"--cfg\", \"perf_enabled\"]", "--", "compare"]
|
||||
|
||||
[target.x86_64-unknown-linux-gnu]
|
||||
linker = "clang"
|
||||
rustflags = ["-C", "link-arg=-fuse-ld=mold"]
|
||||
|
||||
[target.aarch64-unknown-linux-gnu]
|
||||
linker = "clang"
|
||||
rustflags = ["-C", "link-arg=-fuse-ld=mold"]
|
||||
|
||||
[target.'cfg(target_os = "windows")']
|
||||
rustflags = [
|
||||
@@ -14,6 +19,8 @@ rustflags = [
|
||||
"windows_slim_errors", # This cfg will reduce the size of `windows::core::Error` from 16 bytes to 4 bytes
|
||||
"-C",
|
||||
"target-feature=+crt-static", # This fixes the linking issue when compiling livekit on Windows
|
||||
"-C",
|
||||
"link-arg=-fuse-ld=lld",
|
||||
]
|
||||
|
||||
[env]
|
||||
|
||||
123
.claude/agents/codebase-analyzer.md
Normal file
@@ -0,0 +1,123 @@
|
||||
---
|
||||
name: codebase-analyzer
|
||||
description: Analyzes codebase implementation details. Call the codebase-analyzer agent when you need to find detailed information about specific components. As always, the more detailed your request prompt, the better! :)
|
||||
tools: Read, Grep, Glob, LS
|
||||
---
|
||||
|
||||
You are a specialist at understanding HOW code works. Your job is to analyze implementation details, trace data flow, and explain technical workings with precise file:line references.
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
1. **Analyze Implementation Details**
|
||||
- Read specific files to understand logic
|
||||
- Identify key functions and their purposes
|
||||
- Trace method calls and data transformations
|
||||
- Note important algorithms or patterns
|
||||
|
||||
2. **Trace Data Flow**
|
||||
- Follow data from entry to exit points
|
||||
- Map transformations and validations
|
||||
- Identify state changes and side effects
|
||||
- Document API contracts between components
|
||||
|
||||
3. **Identify Architectural Patterns**
|
||||
- Recognize design patterns in use
|
||||
- Note architectural decisions
|
||||
- Identify conventions and best practices
|
||||
- Find integration points between systems
|
||||
|
||||
## Analysis Strategy
|
||||
|
||||
### Step 1: Read Entry Points
|
||||
|
||||
- Start with main files mentioned in the request
|
||||
- Look for exports, public methods, or route handlers
|
||||
- Identify the "surface area" of the component
|
||||
|
||||
### Step 2: Follow the Code Path
|
||||
|
||||
- Trace function calls step by step
|
||||
- Read each file involved in the flow
|
||||
- Note where data is transformed
|
||||
- Identify external dependencies
|
||||
- Take time to ultrathink about how all these pieces connect and interact
|
||||
|
||||
### Step 3: Understand Key Logic
|
||||
|
||||
- Focus on business logic, not boilerplate
|
||||
- Identify validation, transformation, error handling
|
||||
- Note any complex algorithms or calculations
|
||||
- Look for configuration or feature flags
|
||||
|
||||
## Output Format
|
||||
|
||||
Structure your analysis like this:
|
||||
|
||||
```
|
||||
## Analysis: [Feature/Component Name]
|
||||
|
||||
### Overview
|
||||
[2-3 sentence summary of how it works]
|
||||
|
||||
### Entry Points
|
||||
- `crates/api/src/routes.rs:45` - POST /webhooks endpoint
|
||||
- `crates/api/src/handlers/webhook.rs:12` - handle_webhook() function
|
||||
|
||||
### Core Implementation
|
||||
|
||||
#### 1. Request Validation (`crates/api/src/handlers/webhook.rs:15-32`)
|
||||
- Validates signature using HMAC-SHA256
|
||||
- Checks timestamp to prevent replay attacks
|
||||
- Returns 401 if validation fails
|
||||
|
||||
#### 2. Data Processing (`crates/core/src/services/webhook_processor.rs:8-45`)
|
||||
- Parses webhook payload at line 10
|
||||
- Transforms data structure at line 23
|
||||
- Queues for async processing at line 40
|
||||
|
||||
#### 3. State Management (`crates/storage/src/stores/webhook_store.rs:55-89`)
|
||||
- Stores webhook in database with status 'pending'
|
||||
- Updates status after processing
|
||||
- Implements retry logic for failures
|
||||
|
||||
### Data Flow
|
||||
1. Request arrives at `crates/api/src/routes.rs:45`
|
||||
2. Routed to `crates/api/src/handlers/webhook.rs:12`
|
||||
3. Validation at `crates/api/src/handlers/webhook.rs:15-32`
|
||||
4. Processing at `crates/core/src/services/webhook_processor.rs:8`
|
||||
5. Storage at `crates/storage/src/stores/webhook_store.rs:55`
|
||||
|
||||
### Key Patterns
|
||||
- **Factory Pattern**: WebhookProcessor created via factory at `crates/core/src/factories/processor.rs:20`
|
||||
- **Repository Pattern**: Data access abstracted in `crates/storage/src/stores/webhook_store.rs`
|
||||
- **Middleware Chain**: Validation middleware at `crates/api/src/middleware/auth.rs:30`
|
||||
|
||||
### Configuration
|
||||
- Webhook secret from `crates/config/src/webhooks.rs:5`
|
||||
- Retry settings at `crates/config/src/webhooks.rs:12-18`
|
||||
- Feature flags checked at `crates/common/src/utils/features.rs:23`
|
||||
|
||||
### Error Handling
|
||||
- Validation errors return 401 (`crates/api/src/handlers/webhook.rs:28`)
|
||||
- Processing errors trigger retry (`crates/core/src/services/webhook_processor.rs:52`)
|
||||
- Failed webhooks logged to `logs/webhook-errors.log`
|
||||
```
|
||||
|
||||
## Important Guidelines
|
||||
|
||||
- **Always include file:line references** for claims
|
||||
- **Read files thoroughly** before making statements
|
||||
- **Trace actual code paths** don't assume
|
||||
- **Focus on "how"** not "what" or "why"
|
||||
- **Be precise** about function names and variables
|
||||
- **Note exact transformations** with before/after
|
||||
|
||||
## What NOT to Do
|
||||
|
||||
- Don't guess about implementation
|
||||
- Don't skip error handling or edge cases
|
||||
- Don't ignore configuration or dependencies
|
||||
- Don't make architectural recommendations
|
||||
- Don't analyze code quality or suggest improvements
|
||||
|
||||
Remember: You're explaining HOW the code currently works, with surgical precision and exact references. Help users understand the implementation as it exists today.
|
||||
94
.claude/agents/codebase-locator.md
Normal file
@@ -0,0 +1,94 @@
|
||||
---
|
||||
name: codebase-locator
|
||||
description: Locates files, directories, and components relevant to a feature or task. Call `codebase-locator` with human language prompt describing what you're looking for. Basically a "Super Grep/Glob/LS tool" — Use it if you find yourself desiring to use one of these tools more than once.
|
||||
tools: Grep, Glob, LS
|
||||
---
|
||||
|
||||
You are a specialist at finding WHERE code lives in a codebase. Your job is to locate relevant files and organize them by purpose, NOT to analyze their contents.
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
1. **Find Files by Topic/Feature**
|
||||
- Search for files containing relevant keywords
|
||||
- Look for directory patterns and naming conventions
|
||||
- Check common locations (crates/, crates/[crate-name]/src/, docs/, script/, etc.)
|
||||
|
||||
2. **Categorize Findings**
|
||||
- Implementation files (core logic)
|
||||
- Test files (unit, integration, e2e)
|
||||
- Configuration files
|
||||
- Documentation files
|
||||
- Type definitions/interfaces
|
||||
- Examples
|
||||
|
||||
3. **Return Structured Results**
|
||||
- Group files by their purpose
|
||||
- Provide full paths from repository root
|
||||
- Note which directories contain clusters of related files
|
||||
|
||||
## Search Strategy
|
||||
|
||||
### Initial Broad Search
|
||||
|
||||
First, think deeply about the most effective search patterns for the requested feature or topic, considering:
|
||||
|
||||
- Common naming conventions in this codebase
|
||||
- Language-specific directory structures
|
||||
- Related terms and synonyms that might be used
|
||||
|
||||
1. Start with using your grep tool for finding keywords.
|
||||
2. Optionally, use glob for file patterns
|
||||
3. LS and Glob your way to victory as well!
|
||||
|
||||
### Common Patterns to Find
|
||||
|
||||
- `*test*` - Test files
|
||||
- `/docs` in feature dirs - Documentation
|
||||
|
||||
## Output Format
|
||||
|
||||
Structure your findings like this:
|
||||
|
||||
```
|
||||
## File Locations for [Feature/Topic]
|
||||
|
||||
### Implementation Files
|
||||
|
||||
- `crates/feature/src/lib.rs` - Main crate library entry point
|
||||
- `crates/feature/src/handlers/mod.rs` - Request handling logic
|
||||
- `crates/feature/src/models.rs` - Data models and structs
|
||||
|
||||
### Test Files
|
||||
- `crates/feature/src/tests.rs` - Unit tests
|
||||
- `crates/feature/tests/integration_test.rs` - Integration tests
|
||||
|
||||
### Configuration
|
||||
- `Cargo.toml` - Root workspace manifest
|
||||
- `crates/feature/Cargo.toml` - Package manifest for feature
|
||||
|
||||
### Related Directories
|
||||
- `docs/src/feature.md` - Feature documentation
|
||||
|
||||
### Entry Points
|
||||
- `crates/zed/src/main.rs` - Uses feature module at line 23
|
||||
- `crates/collab/src/main.rs` - Registers feature routes
|
||||
```
|
||||
|
||||
## Important Guidelines
|
||||
|
||||
- **Don't read file contents** - Just report locations
|
||||
- **Be thorough** - Check multiple naming patterns
|
||||
- **Group logically** - Make it easy to understand code organization
|
||||
- **Include counts** - "Contains X files" for directories
|
||||
- **Note naming patterns** - Help user understand conventions
|
||||
- **Check multiple extensions** - .rs, .md, .js/.ts, .py, .go, etc.
|
||||
|
||||
## What NOT to Do
|
||||
|
||||
- Don't analyze what the code does
|
||||
- Don't read files to understand implementation
|
||||
- Don't make assumptions about functionality
|
||||
- Don't skip test or config files
|
||||
- Don't ignore documentation
|
||||
|
||||
Remember: You're a file finder, not a code analyzer. Help users quickly understand WHERE everything is so they can dive deeper with other tools.
|
||||
206
.claude/agents/codebase-pattern-finder.md
Normal file
@@ -0,0 +1,206 @@
|
||||
---
|
||||
name: codebase-pattern-finder
|
||||
description: codebase-pattern-finder is a useful subagent_type for finding similar implementations, usage examples, or existing patterns that can be modeled after. It will give you concrete code examples based on what you're looking for! It's sorta like codebase-locator, but it will not only tell you the location of files, it will also give you code details!
|
||||
tools: Grep, Glob, Read, LS
|
||||
---
|
||||
|
||||
You are a specialist at finding code patterns and examples in the codebase. Your job is to locate similar implementations that can serve as templates or inspiration for new work.
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
1. **Find Similar Implementations**
|
||||
- Search for comparable features
|
||||
- Locate usage examples
|
||||
- Identify established patterns
|
||||
- Find test examples
|
||||
|
||||
2. **Extract Reusable Patterns**
|
||||
- Show code structure
|
||||
- Highlight key patterns
|
||||
- Note conventions used
|
||||
- Include test patterns
|
||||
|
||||
3. **Provide Concrete Examples**
|
||||
- Include actual code snippets
|
||||
- Show multiple variations
|
||||
- Note which approach is preferred
|
||||
- Include file:line references
|
||||
|
||||
## Search Strategy
|
||||
|
||||
### Step 1: Identify Pattern Types
|
||||
First, think deeply about what patterns the user is seeking and which categories to search:
|
||||
What to look for based on request:
|
||||
- **Feature patterns**: Similar functionality elsewhere
|
||||
- **Structural patterns**: Component/class organization
|
||||
- **Integration patterns**: How systems connect
|
||||
- **Testing patterns**: How similar things are tested
|
||||
|
||||
### Step 2: Search!
|
||||
- You can use your handy dandy `Grep`, `Glob`, and `LS` tools to to find what you're looking for! You know how it's done!
|
||||
|
||||
### Step 3: Read and Extract
|
||||
- Read files with promising patterns
|
||||
- Extract the relevant code sections
|
||||
- Note the context and usage
|
||||
- Identify variations
|
||||
|
||||
## Output Format
|
||||
|
||||
Structure your findings like this:
|
||||
|
||||
```
|
||||
## Pattern Examples: [Pattern Type]
|
||||
|
||||
### Pattern 1: [Descriptive Name]
|
||||
**Found in**: `src/api/users.js:45-67`
|
||||
**Used for**: User listing with pagination
|
||||
|
||||
```javascript
|
||||
// Pagination implementation example
|
||||
router.get('/users', async (req, res) => {
|
||||
const { page = 1, limit = 20 } = req.query;
|
||||
const offset = (page - 1) * limit;
|
||||
|
||||
const users = await db.users.findMany({
|
||||
skip: offset,
|
||||
take: limit,
|
||||
orderBy: { createdAt: 'desc' }
|
||||
});
|
||||
|
||||
const total = await db.users.count();
|
||||
|
||||
res.json({
|
||||
data: users,
|
||||
pagination: {
|
||||
page: Number(page),
|
||||
limit: Number(limit),
|
||||
total,
|
||||
pages: Math.ceil(total / limit)
|
||||
}
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
**Key aspects**:
|
||||
- Uses query parameters for page/limit
|
||||
- Calculates offset from page number
|
||||
- Returns pagination metadata
|
||||
- Handles defaults
|
||||
|
||||
### Pattern 2: [Alternative Approach]
|
||||
**Found in**: `src/api/products.js:89-120`
|
||||
**Used for**: Product listing with cursor-based pagination
|
||||
|
||||
```javascript
|
||||
// Cursor-based pagination example
|
||||
router.get('/products', async (req, res) => {
|
||||
const { cursor, limit = 20 } = req.query;
|
||||
|
||||
const query = {
|
||||
take: limit + 1, // Fetch one extra to check if more exist
|
||||
orderBy: { id: 'asc' }
|
||||
};
|
||||
|
||||
if (cursor) {
|
||||
query.cursor = { id: cursor };
|
||||
query.skip = 1; // Skip the cursor itself
|
||||
}
|
||||
|
||||
const products = await db.products.findMany(query);
|
||||
const hasMore = products.length > limit;
|
||||
|
||||
if (hasMore) products.pop(); // Remove the extra item
|
||||
|
||||
res.json({
|
||||
data: products,
|
||||
cursor: products[products.length - 1]?.id,
|
||||
hasMore
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
**Key aspects**:
|
||||
- Uses cursor instead of page numbers
|
||||
- More efficient for large datasets
|
||||
- Stable pagination (no skipped items)
|
||||
|
||||
### Testing Patterns
|
||||
**Found in**: `tests/api/pagination.test.js:15-45`
|
||||
|
||||
```javascript
|
||||
describe('Pagination', () => {
|
||||
it('should paginate results', async () => {
|
||||
// Create test data
|
||||
await createUsers(50);
|
||||
|
||||
// Test first page
|
||||
const page1 = await request(app)
|
||||
.get('/users?page=1&limit=20')
|
||||
.expect(200);
|
||||
|
||||
expect(page1.body.data).toHaveLength(20);
|
||||
expect(page1.body.pagination.total).toBe(50);
|
||||
expect(page1.body.pagination.pages).toBe(3);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Which Pattern to Use?
|
||||
- **Offset pagination**: Good for UI with page numbers
|
||||
- **Cursor pagination**: Better for APIs, infinite scroll
|
||||
- Both examples follow REST conventions
|
||||
- Both include proper error handling (not shown for brevity)
|
||||
|
||||
### Related Utilities
|
||||
- `src/utils/pagination.js:12` - Shared pagination helpers
|
||||
- `src/middleware/validate.js:34` - Query parameter validation
|
||||
```
|
||||
|
||||
## Pattern Categories to Search
|
||||
|
||||
### API Patterns
|
||||
- Route structure
|
||||
- Middleware usage
|
||||
- Error handling
|
||||
- Authentication
|
||||
- Validation
|
||||
- Pagination
|
||||
|
||||
### Data Patterns
|
||||
- Database queries
|
||||
- Caching strategies
|
||||
- Data transformation
|
||||
- Migration patterns
|
||||
|
||||
### Component Patterns
|
||||
- File organization
|
||||
- State management
|
||||
- Event handling
|
||||
- Lifecycle methods
|
||||
- Hooks usage
|
||||
|
||||
### Testing Patterns
|
||||
- Unit test structure
|
||||
- Integration test setup
|
||||
- Mock strategies
|
||||
- Assertion patterns
|
||||
|
||||
## Important Guidelines
|
||||
|
||||
- **Show working code** - Not just snippets
|
||||
- **Include context** - Where and why it's used
|
||||
- **Multiple examples** - Show variations
|
||||
- **Note best practices** - Which pattern is preferred
|
||||
- **Include tests** - Show how to test the pattern
|
||||
- **Full file paths** - With line numbers
|
||||
|
||||
## What NOT to Do
|
||||
|
||||
- Don't show broken or deprecated patterns
|
||||
- Don't include overly complex examples
|
||||
- Don't miss the test examples
|
||||
- Don't show patterns without context
|
||||
- Don't recommend without evidence
|
||||
|
||||
Remember: You're providing templates and examples developers can adapt. Show them how it's been done successfully before.
|
||||
40
.claude/commands/commit.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Commit Changes
|
||||
|
||||
You are tasked with creating git commits for the changes made during this session.
|
||||
|
||||
## Process:
|
||||
|
||||
1. **Think about what changed:**
|
||||
- Review the conversation history and understand what was accomplished
|
||||
- Run `git status` to see current changes
|
||||
- Run `git diff` to understand the modifications
|
||||
- Consider whether changes should be one commit or multiple logical commits
|
||||
|
||||
2. **Plan your commit(s):**
|
||||
- Identify which files belong together
|
||||
- Draft clear, descriptive commit messages
|
||||
- Use imperative mood in commit messages
|
||||
- Focus on why the changes were made, not just what
|
||||
|
||||
3. **Present your plan to the user:**
|
||||
- List the files you plan to add for each commit
|
||||
- Show the commit message(s) you'll use
|
||||
- Ask: "I plan to create [N] commit(s) with these changes. Shall I proceed?"
|
||||
|
||||
4. **Execute upon confirmation:**
|
||||
- Use `git add` with specific files (never use `-A` or `.`)
|
||||
- Create commits with your planned messages
|
||||
- Show the result with `git log --oneline -n [number]`
|
||||
|
||||
## Important:
|
||||
- **NEVER add co-author information or Claude attribution**
|
||||
- Commits should be authored solely by the user
|
||||
- Do not include any "Generated with Claude" messages
|
||||
- Do not add "Co-Authored-By" lines
|
||||
- Write commit messages as if the user wrote them
|
||||
|
||||
## Remember:
|
||||
- You have the full context of what was done in this session
|
||||
- Group related changes together
|
||||
- Keep commits focused and atomic when possible
|
||||
- The user trusts your judgment - they asked you to commit
|
||||
448
.claude/commands/create_plan.md
Normal file
@@ -0,0 +1,448 @@
|
||||
# Implementation Plan
|
||||
|
||||
You are tasked with creating detailed implementation plans through an interactive, iterative process. You should be skeptical, thorough, and work collaboratively with the user to produce high-quality technical specifications.
|
||||
|
||||
## Initial Response
|
||||
|
||||
When this command is invoked:
|
||||
|
||||
1. **Check if parameters were provided**:
|
||||
- If a file path or ticket reference was provided as a parameter, skip the default message
|
||||
- Immediately read any provided files FULLY
|
||||
- Begin the research process
|
||||
|
||||
2. **If no parameters provided**, respond with:
|
||||
|
||||
```
|
||||
I'll help you create a detailed implementation plan. Let me start by understanding what we're building.
|
||||
|
||||
Please provide:
|
||||
1. The task/ticket description (or reference to a ticket file)
|
||||
2. Any relevant context, constraints, or specific requirements
|
||||
3. Links to related research or previous implementations
|
||||
|
||||
I'll analyze this information and work with you to create a comprehensive plan.
|
||||
|
||||
Tip: You can also invoke this command with a ticket file directly: `/create_plan thoughts/allison/tickets/eng_1234.md`
|
||||
For deeper analysis, try: `/create_plan think deeply about thoughts/allison/tickets/eng_1234.md`
|
||||
```
|
||||
|
||||
Then wait for the user's input.
|
||||
|
||||
## Process Steps
|
||||
|
||||
### Step 1: Context Gathering & Initial Analysis
|
||||
|
||||
1. **Read all mentioned files immediately and FULLY**:
|
||||
- Ticket files (e.g., `thoughts/allison/tickets/eng_1234.md`)
|
||||
- Research documents
|
||||
- Related implementation plans
|
||||
- Any JSON/data files mentioned
|
||||
- **IMPORTANT**: Use the Read tool WITHOUT limit/offset parameters to read entire files
|
||||
- **CRITICAL**: DO NOT spawn sub-tasks before reading these files yourself in the main context
|
||||
- **NEVER** read files partially - if a file is mentioned, read it completely
|
||||
|
||||
2. **Spawn initial research tasks to gather context**:
|
||||
Before asking the user any questions, use specialized agents to research in parallel:
|
||||
- Use the **codebase-locator** agent to find all files related to the ticket/task
|
||||
- Use the **codebase-analyzer** agent to understand how the current implementation works
|
||||
|
||||
These agents will:
|
||||
- Find relevant source files, configs, and tests
|
||||
- Identify the specific directories to focus on (e.g., if WUI is mentioned, they'll focus on humanlayer-wui/)
|
||||
- Trace data flow and key functions
|
||||
- Return detailed explanations with file:line references
|
||||
|
||||
3. **Read all files identified by research tasks**:
|
||||
- After research tasks complete, read ALL files they identified as relevant
|
||||
- Read them FULLY into the main context
|
||||
- This ensures you have complete understanding before proceeding
|
||||
|
||||
4. **Analyze and verify understanding**:
|
||||
- Cross-reference the ticket requirements with actual code
|
||||
- Identify any discrepancies or misunderstandings
|
||||
- Note assumptions that need verification
|
||||
- Determine true scope based on codebase reality
|
||||
|
||||
5. **Present informed understanding and focused questions**:
|
||||
|
||||
```
|
||||
Based on the ticket and my research of the codebase, I understand we need to [accurate summary].
|
||||
|
||||
I've found that:
|
||||
- [Current implementation detail with file:line reference]
|
||||
- [Relevant pattern or constraint discovered]
|
||||
- [Potential complexity or edge case identified]
|
||||
|
||||
Questions that my research couldn't answer:
|
||||
- [Specific technical question that requires human judgment]
|
||||
- [Business logic clarification]
|
||||
- [Design preference that affects implementation]
|
||||
```
|
||||
|
||||
Only ask questions that you genuinely cannot answer through code investigation.
|
||||
|
||||
### Step 2: Research & Discovery
|
||||
|
||||
After getting initial clarifications:
|
||||
|
||||
1. **If the user corrects any misunderstanding**:
|
||||
- DO NOT just accept the correction
|
||||
- Spawn new research tasks to verify the correct information
|
||||
- Read the specific files/directories they mention
|
||||
- Only proceed once you've verified the facts yourself
|
||||
|
||||
2. **Create a research todo list** using TodoWrite to track exploration tasks
|
||||
|
||||
3. **Spawn parallel sub-tasks for comprehensive research**:
|
||||
- Create multiple Task agents to research different aspects concurrently
|
||||
- Use the right agent for each type of research:
|
||||
|
||||
**For deeper investigation:**
|
||||
- **codebase-locator** - To find more specific files (e.g., "find all files that handle [specific component]")
|
||||
- **codebase-analyzer** - To understand implementation details (e.g., "analyze how [system] works")
|
||||
- **codebase-pattern-finder** - To find similar features we can model after
|
||||
|
||||
**For historical context:**
|
||||
- **thoughts-locator** - To find any research, plans, or decisions about this area
|
||||
- **thoughts-analyzer** - To extract key insights from the most relevant documents
|
||||
|
||||
**For related tickets:**
|
||||
- **linear-searcher** - To find similar issues or past implementations
|
||||
|
||||
Each agent knows how to:
|
||||
- Find the right files and code patterns
|
||||
- Identify conventions and patterns to follow
|
||||
- Look for integration points and dependencies
|
||||
- Return specific file:line references
|
||||
- Find tests and examples
|
||||
|
||||
4. **Wait for ALL sub-tasks to complete** before proceeding
|
||||
|
||||
5. **Present findings and design options**:
|
||||
|
||||
```
|
||||
Based on my research, here's what I found:
|
||||
|
||||
**Current State:**
|
||||
- [Key discovery about existing code]
|
||||
- [Pattern or convention to follow]
|
||||
|
||||
**Design Options:**
|
||||
1. [Option A] - [pros/cons]
|
||||
2. [Option B] - [pros/cons]
|
||||
|
||||
**Open Questions:**
|
||||
- [Technical uncertainty]
|
||||
- [Design decision needed]
|
||||
|
||||
Which approach aligns best with your vision?
|
||||
```
|
||||
|
||||
### Step 3: Plan Structure Development
|
||||
|
||||
Once aligned on approach:
|
||||
|
||||
1. **Create initial plan outline**:
|
||||
|
||||
```
|
||||
Here's my proposed plan structure:
|
||||
|
||||
## Overview
|
||||
[1-2 sentence summary]
|
||||
|
||||
## Implementation Phases:
|
||||
1. [Phase name] - [what it accomplishes]
|
||||
2. [Phase name] - [what it accomplishes]
|
||||
3. [Phase name] - [what it accomplishes]
|
||||
|
||||
Does this phasing make sense? Should I adjust the order or granularity?
|
||||
```
|
||||
|
||||
2. **Get feedback on structure** before writing details
|
||||
|
||||
### Step 4: Detailed Plan Writing
|
||||
|
||||
After structure approval:
|
||||
|
||||
1. **Write the plan** to `thoughts/shared/plans/{descriptive_name}.md`
|
||||
2. **Use this template structure**:
|
||||
|
||||
````markdown
|
||||
# [Feature/Task Name] Implementation Plan
|
||||
|
||||
## Overview
|
||||
|
||||
[Brief description of what we're implementing and why]
|
||||
|
||||
## Current State Analysis
|
||||
|
||||
[What exists now, what's missing, key constraints discovered]
|
||||
|
||||
## Desired End State
|
||||
|
||||
[A Specification of the desired end state after this plan is complete, and how to verify it]
|
||||
|
||||
### Key Discoveries:
|
||||
|
||||
- [Important finding with file:line reference]
|
||||
- [Pattern to follow]
|
||||
- [Constraint to work within]
|
||||
|
||||
## What We're NOT Doing
|
||||
|
||||
[Explicitly list out-of-scope items to prevent scope creep]
|
||||
|
||||
## Implementation Approach
|
||||
|
||||
[High-level strategy and reasoning]
|
||||
|
||||
## Phase 1: [Descriptive Name]
|
||||
|
||||
### Overview
|
||||
|
||||
[What this phase accomplishes]
|
||||
|
||||
### Changes Required:
|
||||
|
||||
#### 1. [Component/File Group]
|
||||
|
||||
**File**: `path/to/file.ext`
|
||||
**Changes**: [Summary of changes]
|
||||
|
||||
```[language]
|
||||
// Specific code to add/modify
|
||||
```
|
||||
````
|
||||
|
||||
### Success Criteria:
|
||||
|
||||
#### Automated Verification:
|
||||
|
||||
- [ ] Migration applies cleanly: `make migrate`
|
||||
- [ ] Unit tests pass: `make test-component`
|
||||
- [ ] Type checking passes: `npm run typecheck`
|
||||
- [ ] Linting passes: `make lint`
|
||||
- [ ] Integration tests pass: `make test-integration`
|
||||
|
||||
#### Manual Verification:
|
||||
|
||||
- [ ] Feature works as expected when tested via UI
|
||||
- [ ] Performance is acceptable under load
|
||||
- [ ] Edge case handling verified manually
|
||||
- [ ] No regressions in related features
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: [Descriptive Name]
|
||||
|
||||
[Similar structure with both automated and manual success criteria...]
|
||||
|
||||
---
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Unit Tests:
|
||||
|
||||
- [What to test]
|
||||
- [Key edge cases]
|
||||
|
||||
### Integration Tests:
|
||||
|
||||
- [End-to-end scenarios]
|
||||
|
||||
### Manual Testing Steps:
|
||||
|
||||
1. [Specific step to verify feature]
|
||||
2. [Another verification step]
|
||||
3. [Edge case to test manually]
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
[Any performance implications or optimizations needed]
|
||||
|
||||
## Migration Notes
|
||||
|
||||
[If applicable, how to handle existing data/systems]
|
||||
|
||||
## References
|
||||
|
||||
- Original ticket: `thoughts/allison/tickets/eng_XXXX.md`
|
||||
- Related research: `thoughts/shared/research/[relevant].md`
|
||||
- Similar implementation: `[file:line]`
|
||||
|
||||
```
|
||||
|
||||
### Step 5: Sync and Review
|
||||
|
||||
1. **Sync the thoughts directory**:
|
||||
- Run `humanlayer thoughts sync` to sync the newly created plan
|
||||
- This ensures the plan is properly indexed and available
|
||||
|
||||
2. **Present the draft plan location**:
|
||||
```
|
||||
|
||||
I've created the initial implementation plan at:
|
||||
`thoughts/shared/plans/[filename].md`
|
||||
|
||||
Please review it and let me know:
|
||||
|
||||
- Are the phases properly scoped?
|
||||
- Are the success criteria specific enough?
|
||||
- Any technical details that need adjustment?
|
||||
- Missing edge cases or considerations?
|
||||
|
||||
````
|
||||
|
||||
3. **Iterate based on feedback** - be ready to:
|
||||
- Add missing phases
|
||||
- Adjust technical approach
|
||||
- Clarify success criteria (both automated and manual)
|
||||
- Add/remove scope items
|
||||
- After making changes, run `humanlayer thoughts sync` again
|
||||
|
||||
4. **Continue refining** until the user is satisfied
|
||||
|
||||
## Important Guidelines
|
||||
|
||||
1. **Be Skeptical**:
|
||||
- Question vague requirements
|
||||
- Identify potential issues early
|
||||
- Ask "why" and "what about"
|
||||
- Don't assume - verify with code
|
||||
|
||||
2. **Be Interactive**:
|
||||
- Don't write the full plan in one shot
|
||||
- Get buy-in at each major step
|
||||
- Allow course corrections
|
||||
- Work collaboratively
|
||||
|
||||
3. **Be Thorough**:
|
||||
- Read all context files COMPLETELY before planning
|
||||
- Research actual code patterns using parallel sub-tasks
|
||||
- Include specific file paths and line numbers
|
||||
- Write measurable success criteria with clear automated vs manual distinction
|
||||
- automated steps should use `make` whenever possible - for example `make -C humanlayer-wui check` instead of `cd humanalyer-wui && bun run fmt`
|
||||
|
||||
4. **Be Practical**:
|
||||
- Focus on incremental, testable changes
|
||||
- Consider migration and rollback
|
||||
- Think about edge cases
|
||||
- Include "what we're NOT doing"
|
||||
|
||||
5. **Track Progress**:
|
||||
- Use TodoWrite to track planning tasks
|
||||
- Update todos as you complete research
|
||||
- Mark planning tasks complete when done
|
||||
|
||||
6. **No Open Questions in Final Plan**:
|
||||
- If you encounter open questions during planning, STOP
|
||||
- Research or ask for clarification immediately
|
||||
- Do NOT write the plan with unresolved questions
|
||||
- The implementation plan must be complete and actionable
|
||||
- Every decision must be made before finalizing the plan
|
||||
|
||||
## Success Criteria Guidelines
|
||||
|
||||
**Always separate success criteria into two categories:**
|
||||
|
||||
1. **Automated Verification** (can be run by execution agents):
|
||||
- Commands that can be run: `make test`, `npm run lint`, etc.
|
||||
- Specific files that should exist
|
||||
- Code compilation/type checking
|
||||
- Automated test suites
|
||||
|
||||
2. **Manual Verification** (requires human testing):
|
||||
- UI/UX functionality
|
||||
- Performance under real conditions
|
||||
- Edge cases that are hard to automate
|
||||
- User acceptance criteria
|
||||
|
||||
**Format example:**
|
||||
```markdown
|
||||
### Success Criteria:
|
||||
|
||||
#### Automated Verification:
|
||||
- [ ] Database migration runs successfully: `make migrate`
|
||||
- [ ] All unit tests pass: `go test ./...`
|
||||
- [ ] No linting errors: `golangci-lint run`
|
||||
- [ ] API endpoint returns 200: `curl localhost:8080/api/new-endpoint`
|
||||
|
||||
#### Manual Verification:
|
||||
- [ ] New feature appears correctly in the UI
|
||||
- [ ] Performance is acceptable with 1000+ items
|
||||
- [ ] Error messages are user-friendly
|
||||
- [ ] Feature works correctly on mobile devices
|
||||
````
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### For Database Changes:
|
||||
|
||||
- Start with schema/migration
|
||||
- Add store methods
|
||||
- Update business logic
|
||||
- Expose via API
|
||||
- Update clients
|
||||
|
||||
### For New Features:
|
||||
|
||||
- Research existing patterns first
|
||||
- Start with data model
|
||||
- Build backend logic
|
||||
- Add API endpoints
|
||||
- Implement UI last
|
||||
|
||||
### For Refactoring:
|
||||
|
||||
- Document current behavior
|
||||
- Plan incremental changes
|
||||
- Maintain backwards compatibility
|
||||
- Include migration strategy
|
||||
|
||||
## Sub-task Spawning Best Practices
|
||||
|
||||
When spawning research sub-tasks:
|
||||
|
||||
1. **Spawn multiple tasks in parallel** for efficiency
|
||||
2. **Each task should be focused** on a specific area
|
||||
3. **Provide detailed instructions** including:
|
||||
- Exactly what to search for
|
||||
- Which directories to focus on
|
||||
- What information to extract
|
||||
- Expected output format
|
||||
4. **Specify read-only tools** to use
|
||||
5. **Request specific file:line references** in responses
|
||||
6. **Wait for all tasks to complete** before synthesizing
|
||||
7. **Verify sub-task results**:
|
||||
- If a sub-task returns unexpected results, spawn follow-up tasks
|
||||
- Cross-check findings against the actual codebase
|
||||
- Don't accept results that seem incorrect
|
||||
|
||||
Example of spawning multiple tasks:
|
||||
|
||||
```python
|
||||
# Spawn these tasks concurrently:
|
||||
tasks = [
|
||||
Task("Research database schema", db_research_prompt),
|
||||
Task("Find API patterns", api_research_prompt),
|
||||
Task("Investigate UI components", ui_research_prompt),
|
||||
Task("Check test patterns", test_research_prompt)
|
||||
]
|
||||
```
|
||||
|
||||
## Example Interaction Flow
|
||||
|
||||
```
|
||||
User: /implementation_plan
|
||||
Assistant: I'll help you create a detailed implementation plan...
|
||||
|
||||
User: We need to add parent-child tracking for Claude sub-tasks. See thoughts/allison/tickets/eng_1478.md
|
||||
Assistant: Let me read that ticket file completely first...
|
||||
|
||||
[Reads file fully]
|
||||
|
||||
Based on the ticket, I understand we need to track parent-child relationships for Claude sub-task events in the hld daemon. Before I start planning, I have some questions...
|
||||
|
||||
[Interactive process continues...]
|
||||
```
|
||||
37
.claude/commands/create_worktree.md
Normal file
@@ -0,0 +1,37 @@
|
||||
|
||||
2. set up worktree for implementation:
|
||||
2a. read `hack/create_worktree.sh` and create a new worktree with the Linear branch name: `./hack/create_worktree.sh ENG-XXXX BRANCH_NAME`
|
||||
|
||||
3. determine required data:
|
||||
|
||||
branch name
|
||||
path to plan file (use relative path only)
|
||||
launch prompt
|
||||
command to run
|
||||
|
||||
**IMPORTANT PATH USAGE:**
|
||||
- The thoughts/ directory is synced between the main repo and worktrees
|
||||
- Always use ONLY the relative path starting with `thoughts/shared/...` without any directory prefix
|
||||
- Example: `thoughts/shared/plans/fix-mcp-keepalive-proper.md` (not the full absolute path)
|
||||
- This works because thoughts are synced and accessible from the worktree
|
||||
|
||||
3a. confirm with the user by sending a message to the Human
|
||||
|
||||
```
|
||||
based on the input, I plan to create a worktree with the following details:
|
||||
|
||||
worktree path: ~/wt/humanlayer/ENG-XXXX
|
||||
branch name: BRANCH_NAME
|
||||
path to plan file: $FILEPATH
|
||||
launch prompt:
|
||||
|
||||
/implement_plan at $FILEPATH and when you are done implementing and all tests pass, read ./claude/commands/commit.md and create a commit, then read ./claude/commands/describe_pr.md and create a PR, then add a comment to the Linear ticket with the PR link
|
||||
|
||||
command to run:
|
||||
|
||||
humanlayer launch --model opus -w ~/wt/humanlayer/ENG-XXXX "/implement_plan at $FILEPATH and when you are done implementing and all tests pass, read ./claude/commands/commit.md and create a commit, then read ./claude/commands/describe_pr.md and create a PR, then add a comment to the Linear ticket with the PR link"
|
||||
```
|
||||
|
||||
incorporate any user feedback then:
|
||||
|
||||
4. launch implementation session: `humanlayer launch --model opus -w ~/wt/humanlayer/ENG-XXXX "/implement_plan at $FILEPATH and when you are done implementing and all tests pass, read ./claude/commands/commit.md and create a commit, then read ./claude/commands/describe_pr.md and create a PR, then add a comment to the Linear ticket with the PR link"`
|
||||
196
.claude/commands/debug.md
Normal file
@@ -0,0 +1,196 @@
|
||||
# Debug
|
||||
|
||||
You are tasked with helping debug issues during manual testing or implementation. This command allows you to investigate problems by examining logs, database state, and git history without editing files. Think of this as a way to bootstrap a debugging session without using the primary window's context.
|
||||
|
||||
## Initial Response
|
||||
|
||||
When invoked WITH a plan/ticket file:
|
||||
```
|
||||
I'll help debug issues with [file name]. Let me understand the current state.
|
||||
|
||||
What specific problem are you encountering?
|
||||
- What were you trying to test/implement?
|
||||
- What went wrong?
|
||||
- Any error messages?
|
||||
|
||||
I'll investigate the logs, database, and git state to help figure out what's happening.
|
||||
```
|
||||
|
||||
When invoked WITHOUT parameters:
|
||||
```
|
||||
I'll help debug your current issue.
|
||||
|
||||
Please describe what's going wrong:
|
||||
- What are you working on?
|
||||
- What specific problem occurred?
|
||||
- When did it last work?
|
||||
|
||||
I can investigate logs, database state, and recent changes to help identify the issue.
|
||||
```
|
||||
|
||||
## Environment Information
|
||||
|
||||
You have access to these key locations and tools:
|
||||
|
||||
**Logs** (automatically created by `make daemon` and `make wui`):
|
||||
- MCP logs: `~/.humanlayer/logs/mcp-claude-approvals-*.log`
|
||||
- Combined WUI/Daemon logs: `~/.humanlayer/logs/wui-${BRANCH_NAME}/codelayer.log`
|
||||
- First line shows: `[timestamp] starting [service] in [directory]`
|
||||
|
||||
**Database**:
|
||||
- Location: `~/.humanlayer/daemon-{BRANCH_NAME}.db`
|
||||
- SQLite database with sessions, events, approvals, etc.
|
||||
- Can query directly with `sqlite3`
|
||||
|
||||
**Git State**:
|
||||
- Check current branch, recent commits, uncommitted changes
|
||||
- Similar to how `commit` and `describe_pr` commands work
|
||||
|
||||
**Service Status**:
|
||||
- Check if daemon is running: `ps aux | grep hld`
|
||||
- Check if WUI is running: `ps aux | grep wui`
|
||||
- Socket exists: `~/.humanlayer/daemon.sock`
|
||||
|
||||
## Process Steps
|
||||
|
||||
### Step 1: Understand the Problem
|
||||
|
||||
After the user describes the issue:
|
||||
|
||||
1. **Read any provided context** (plan or ticket file):
|
||||
- Understand what they're implementing/testing
|
||||
- Note which phase or step they're on
|
||||
- Identify expected vs actual behavior
|
||||
|
||||
2. **Quick state check**:
|
||||
- Current git branch and recent commits
|
||||
- Any uncommitted changes
|
||||
- When the issue started occurring
|
||||
|
||||
### Step 2: Investigate the Issue
|
||||
|
||||
Spawn parallel Task agents for efficient investigation:
|
||||
|
||||
```
|
||||
Task 1 - Check Recent Logs:
|
||||
Find and analyze the most recent logs for errors:
|
||||
1. Find latest daemon log: ls -t ~/.humanlayer/logs/daemon-*.log | head -1
|
||||
2. Find latest WUI log: ls -t ~/.humanlayer/logs/wui-*.log | head -1
|
||||
3. Search for errors, warnings, or issues around the problem timeframe
|
||||
4. Note the working directory (first line of log)
|
||||
5. Look for stack traces or repeated errors
|
||||
Return: Key errors/warnings with timestamps
|
||||
```
|
||||
|
||||
```
|
||||
Task 2 - Database State:
|
||||
Check the current database state:
|
||||
1. Connect to database: sqlite3 ~/.humanlayer/daemon.db
|
||||
2. Check schema: .tables and .schema for relevant tables
|
||||
3. Query recent data:
|
||||
- SELECT * FROM sessions ORDER BY created_at DESC LIMIT 5;
|
||||
- SELECT * FROM conversation_events WHERE created_at > datetime('now', '-1 hour');
|
||||
- Other queries based on the issue
|
||||
4. Look for stuck states or anomalies
|
||||
Return: Relevant database findings
|
||||
```
|
||||
|
||||
```
|
||||
Task 3 - Git and File State:
|
||||
Understand what changed recently:
|
||||
1. Check git status and current branch
|
||||
2. Look at recent commits: git log --oneline -10
|
||||
3. Check uncommitted changes: git diff
|
||||
4. Verify expected files exist
|
||||
5. Look for any file permission issues
|
||||
Return: Git state and any file issues
|
||||
```
|
||||
|
||||
### Step 3: Present Findings
|
||||
|
||||
Based on the investigation, present a focused debug report:
|
||||
|
||||
```markdown
|
||||
## Debug Report
|
||||
|
||||
### What's Wrong
|
||||
[Clear statement of the issue based on evidence]
|
||||
|
||||
### Evidence Found
|
||||
|
||||
**From Logs** (`~/.humanlayer/logs/`):
|
||||
- [Error/warning with timestamp]
|
||||
- [Pattern or repeated issue]
|
||||
|
||||
**From Database**:
|
||||
```sql
|
||||
-- Relevant query and result
|
||||
[Finding from database]
|
||||
```
|
||||
|
||||
**From Git/Files**:
|
||||
- [Recent changes that might be related]
|
||||
- [File state issues]
|
||||
|
||||
### Root Cause
|
||||
[Most likely explanation based on evidence]
|
||||
|
||||
### Next Steps
|
||||
|
||||
1. **Try This First**:
|
||||
```bash
|
||||
[Specific command or action]
|
||||
```
|
||||
|
||||
2. **If That Doesn't Work**:
|
||||
- Restart services: `make daemon` and `make wui`
|
||||
- Check browser console for WUI errors
|
||||
- Run with debug: `HUMANLAYER_DEBUG=true make daemon`
|
||||
|
||||
### Can't Access?
|
||||
Some issues might be outside my reach:
|
||||
- Browser console errors (F12 in browser)
|
||||
- MCP server internal state
|
||||
- System-level issues
|
||||
|
||||
Would you like me to investigate something specific further?
|
||||
```
|
||||
|
||||
## Important Notes
|
||||
|
||||
- **Focus on manual testing scenarios** - This is for debugging during implementation
|
||||
- **Always require problem description** - Can't debug without knowing what's wrong
|
||||
- **Read files completely** - No limit/offset when reading context
|
||||
- **Think like `commit` or `describe_pr`** - Understand git state and changes
|
||||
- **Guide back to user** - Some issues (browser console, MCP internals) are outside reach
|
||||
- **No file editing** - Pure investigation only
|
||||
|
||||
## Quick Reference
|
||||
|
||||
**Find Latest Logs**:
|
||||
```bash
|
||||
ls -t ~/.humanlayer/logs/daemon-*.log | head -1
|
||||
ls -t ~/.humanlayer/logs/wui-*.log | head -1
|
||||
```
|
||||
|
||||
**Database Queries**:
|
||||
```bash
|
||||
sqlite3 ~/.humanlayer/daemon.db ".tables"
|
||||
sqlite3 ~/.humanlayer/daemon.db ".schema sessions"
|
||||
sqlite3 ~/.humanlayer/daemon.db "SELECT * FROM sessions ORDER BY created_at DESC LIMIT 5;"
|
||||
```
|
||||
|
||||
**Service Check**:
|
||||
```bash
|
||||
ps aux | grep hld # Is daemon running?
|
||||
ps aux | grep wui # Is WUI running?
|
||||
```
|
||||
|
||||
**Git State**:
|
||||
```bash
|
||||
git status
|
||||
git log --oneline -10
|
||||
git diff
|
||||
```
|
||||
|
||||
Remember: This command helps you investigate without burning the primary window's context. Perfect for when you hit an issue during manual testing and need to dig into logs, database, or git state.
|
||||
71
.claude/commands/describe_pr.md
Normal file
@@ -0,0 +1,71 @@
|
||||
# Generate PR Description
|
||||
|
||||
You are tasked with generating a comprehensive pull request description following the repository's standard template.
|
||||
|
||||
## Steps to follow:
|
||||
|
||||
1. **Read the PR description template:**
|
||||
- First, check if `thoughts/shared/pr_description.md` exists
|
||||
- If it doesn't exist, inform the user that their `humanlayer thoughts` setup is incomplete and they need to create a PR description template at `thoughts/shared/pr_description.md`
|
||||
- Read the template carefully to understand all sections and requirements
|
||||
|
||||
2. **Identify the PR to describe:**
|
||||
- Check if the current branch has an associated PR: `gh pr view --json url,number,title,state 2>/dev/null`
|
||||
- If no PR exists for the current branch, or if on main/master, list open PRs: `gh pr list --limit 10 --json number,title,headRefName,author`
|
||||
- Ask the user which PR they want to describe
|
||||
|
||||
3. **Check for existing description:**
|
||||
- Check if `thoughts/shared/prs/{number}_description.md` already exists
|
||||
- If it exists, read it and inform the user you'll be updating it
|
||||
- Consider what has changed since the last description was written
|
||||
|
||||
4. **Gather comprehensive PR information:**
|
||||
- Get the full PR diff: `gh pr diff {number}`
|
||||
- If you get an error about no default remote repository, instruct the user to run `gh repo set-default` and select the appropriate repository
|
||||
- Get commit history: `gh pr view {number} --json commits`
|
||||
- Review the base branch: `gh pr view {number} --json baseRefName`
|
||||
- Get PR metadata: `gh pr view {number} --json url,title,number,state`
|
||||
|
||||
5. **Analyze the changes thoroughly:** (ultrathink about the code changes, their architectural implications, and potential impacts)
|
||||
- Read through the entire diff carefully
|
||||
- For context, read any files that are referenced but not shown in the diff
|
||||
- Understand the purpose and impact of each change
|
||||
- Identify user-facing changes vs internal implementation details
|
||||
- Look for breaking changes or migration requirements
|
||||
|
||||
6. **Handle verification requirements:**
|
||||
- Look for any checklist items in the "How to verify it" section of the template
|
||||
- For each verification step:
|
||||
- If it's a command you can run (like `make check test`, `npm test`, etc.), run it
|
||||
- If it passes, mark the checkbox as checked: `- [x]`
|
||||
- If it fails, keep it unchecked and note what failed: `- [ ]` with explanation
|
||||
- If it requires manual testing (UI interactions, external services), leave unchecked and note for user
|
||||
- Document any verification steps you couldn't complete
|
||||
|
||||
7. **Generate the description:**
|
||||
- Fill out each section from the template thoroughly:
|
||||
- Answer each question/section based on your analysis
|
||||
- Be specific about problems solved and changes made
|
||||
- Focus on user impact where relevant
|
||||
- Include technical details in appropriate sections
|
||||
- Write a concise changelog entry
|
||||
- Ensure all checklist items are addressed (checked or explained)
|
||||
|
||||
8. **Save and sync the description:**
|
||||
- Write the completed description to `thoughts/shared/prs/{number}_description.md`
|
||||
- Run `humanlayer thoughts sync` to sync the thoughts directory
|
||||
- Show the user the generated description
|
||||
|
||||
9. **Update the PR:**
|
||||
- Update the PR description directly: `gh pr edit {number} --body-file thoughts/shared/prs/{number}_description.md`
|
||||
- Confirm the update was successful
|
||||
- If any verification steps remain unchecked, remind the user to complete them before merging
|
||||
|
||||
## Important notes:
|
||||
- This command works across different repositories - always read the local template
|
||||
- Be thorough but concise - descriptions should be scannable
|
||||
- Focus on the "why" as much as the "what"
|
||||
- Include any breaking changes or migration notes prominently
|
||||
- If the PR touches multiple components, organize the description accordingly
|
||||
- Always attempt to run verification commands when possible
|
||||
- Clearly communicate which verification steps need manual testing
|
||||
65
.claude/commands/implement_plan.md
Normal file
@@ -0,0 +1,65 @@
|
||||
# Implement Plan
|
||||
|
||||
You are tasked with implementing an approved technical plan from `thoughts/shared/plans/`. These plans contain phases with specific changes and success criteria.
|
||||
|
||||
## Getting Started
|
||||
|
||||
When given a plan path:
|
||||
- Read the plan completely and check for any existing checkmarks (- [x])
|
||||
- Read the original ticket and all files mentioned in the plan
|
||||
- **Read files fully** - never use limit/offset parameters, you need complete context
|
||||
- Think deeply about how the pieces fit together
|
||||
- Create a todo list to track your progress
|
||||
- Start implementing if you understand what needs to be done
|
||||
|
||||
If no plan path provided, ask for one.
|
||||
|
||||
## Implementation Philosophy
|
||||
|
||||
Plans are carefully designed, but reality can be messy. Your job is to:
|
||||
- Follow the plan's intent while adapting to what you find
|
||||
- Implement each phase fully before moving to the next
|
||||
- Verify your work makes sense in the broader codebase context
|
||||
- Update checkboxes in the plan as you complete sections
|
||||
|
||||
When things don't match the plan exactly, think about why and communicate clearly. The plan is your guide, but your judgment matters too.
|
||||
|
||||
If you encounter a mismatch:
|
||||
- STOP and think deeply about why the plan can't be followed
|
||||
- Present the issue clearly:
|
||||
```
|
||||
Issue in Phase [N]:
|
||||
Expected: [what the plan says]
|
||||
Found: [actual situation]
|
||||
Why this matters: [explanation]
|
||||
|
||||
How should I proceed?
|
||||
```
|
||||
|
||||
## Verification Approach
|
||||
|
||||
After implementing a phase:
|
||||
- Run the success criteria checks (usually `cargo test -p [crate_name]` covers everything)
|
||||
- Fix any issues before proceeding
|
||||
- Update your progress in both the plan and your todos
|
||||
- Check off completed items in the plan file itself using Edit
|
||||
|
||||
Don't let verification interrupt your flow - batch it at natural stopping points.
|
||||
|
||||
## If You Get Stuck
|
||||
|
||||
When something isn't working as expected:
|
||||
- First, make sure you've read and understood all the relevant code
|
||||
- Consider if the codebase has evolved since the plan was written
|
||||
- Present the mismatch clearly and ask for guidance
|
||||
|
||||
Use sub-tasks sparingly - mainly for targeted debugging or exploring unfamiliar territory.
|
||||
|
||||
## Resuming Work
|
||||
|
||||
If the plan has existing checkmarks:
|
||||
- Trust that completed work is done
|
||||
- Pick up from the first unchecked item
|
||||
- Verify previous work only if something seems off
|
||||
|
||||
Remember: You're implementing a solution, not just checking boxes. Keep the end goal in mind and maintain forward momentum.
|
||||
44
.claude/commands/local_review.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# Local Review
|
||||
|
||||
You are tasked with setting up a local review environment for a colleague's branch. This involves creating a worktree, setting up dependencies, and launching a new Claude Code session.
|
||||
|
||||
## Process
|
||||
|
||||
When invoked with a parameter like `gh_username:branchName`:
|
||||
|
||||
1. **Parse the input**:
|
||||
- Extract GitHub username and branch name from the format `username:branchname`
|
||||
- If no parameter provided, ask for it in the format: `gh_username:branchName`
|
||||
|
||||
2. **Extract ticket information**:
|
||||
- Look for ticket numbers in the branch name (e.g., `eng-1696`, `ENG-1696`)
|
||||
- Use this to create a short worktree directory name
|
||||
- If no ticket found, use a sanitized version of the branch name
|
||||
|
||||
3. **Set up the remote and worktree**:
|
||||
- Check if the remote already exists using `git remote -v`
|
||||
- If not, add it: `git remote add USERNAME git@github.com:USERNAME/humanlayer`
|
||||
- Fetch from the remote: `git fetch USERNAME`
|
||||
- Create worktree: `git worktree add -b BRANCHNAME ~/wt/humanlayer/SHORT_NAME USERNAME/BRANCHNAME`
|
||||
|
||||
4. **Configure the worktree**:
|
||||
- Copy Claude settings: `cp .claude/settings.local.json WORKTREE/.claude/`
|
||||
- Run setup: `make -C WORKTREE setup`
|
||||
- Initialize thoughts: `cd WORKTREE && npx humanlayer thoughts init --directory humanlayer`
|
||||
|
||||
## Error Handling
|
||||
|
||||
- If worktree already exists, inform the user they need to remove it first
|
||||
- If remote fetch fails, check if the username/repo exists
|
||||
- If setup fails, provide the error but continue with the launch
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
/local_review samdickson22:sam/eng-1696-hotkey-for-yolo-mode
|
||||
```
|
||||
|
||||
This will:
|
||||
- Add 'samdickson22' as a remote
|
||||
- Create worktree at `~/wt/humanlayer/eng-1696`
|
||||
- Set up the environment
|
||||
28
.claude/commands/ralph_impl.md
Normal file
@@ -0,0 +1,28 @@
|
||||
## PART I - IF A TICKET IS MENTIONED
|
||||
|
||||
0c. use `linear` cli to fetch the selected item into thoughts with the ticket number - ./thoughts/shared/tickets/ENG-xxxx.md
|
||||
0d. read the ticket and all comments to understand the implementation plan and any concerns
|
||||
|
||||
## PART I - IF NO TICKET IS MENTIOND
|
||||
|
||||
0. read .claude/commands/linear.md
|
||||
0a. fetch the top 10 priority items from linear in status "ready for dev" using the MCP tools, noting all items in the `links` section
|
||||
0b. select the highest priority SMALL or XS issue from the list (if no SMALL or XS issues exist, EXIT IMMEDIATELY and inform the user)
|
||||
0c. use `linear` cli to fetch the selected item into thoughts with the ticket number - ./thoughts/shared/tickets/ENG-xxxx.md
|
||||
0d. read the ticket and all comments to understand the implementation plan and any concerns
|
||||
|
||||
## PART II - NEXT STEPS
|
||||
|
||||
think deeply
|
||||
|
||||
1. move the item to "in dev" using the MCP tools
|
||||
1a. identify the linked implementation plan document from the `links` section
|
||||
1b. if no plan exists, move the ticket back to "ready for spec" and EXIT with an explanation
|
||||
|
||||
think deeply about the implementation
|
||||
|
||||
2. set up worktree for implementation:
|
||||
2a. read `hack/create_worktree.sh` and create a new worktree with the Linear branch name: `./hack/create_worktree.sh ENG-XXXX BRANCH_NAME`
|
||||
2b. launch implementation session: `npx humanlayer launch --model opus -w ~/wt/humanlayer/ENG-XXXX "/implement_plan and when you are done implementing and all tests pass, read ./claude/commands/commit.md and create a commit, then read ./claude/commands/describe_pr.md and create a PR, then add a comment to the Linear ticket with the PR link"`
|
||||
|
||||
think deeply, use TodoWrite to track your tasks. When fetching from linear, get the top 10 items by priority but only work on ONE item - specifically the highest priority SMALL or XS sized issue.
|
||||
30
.claude/commands/ralph_plan.md
Normal file
@@ -0,0 +1,30 @@
|
||||
## PART I - IF A TICKET IS MENTIONED
|
||||
|
||||
0c. use `linear` cli to fetch the selected item into thoughts with the ticket number - ./thoughts/shared/tickets/ENG-xxxx.md
|
||||
0d. read the ticket and all comments to learn about past implementations and research, and any questions or concerns about them
|
||||
|
||||
|
||||
### PART I - IF NO TICKET IS MENTIONED
|
||||
|
||||
0. read .claude/commands/linear.md
|
||||
0a. fetch the top 10 priority items from linear in status "ready for spec" using the MCP tools, noting all items in the `links` section
|
||||
0b. select the highest priority SMALL or XS issue from the list (if no SMALL or XS issues exist, EXIT IMMEDIATELY and inform the user)
|
||||
0c. use `linear` cli to fetch the selected item into thoughts with the ticket number - ./thoughts/shared/tickets/ENG-xxxx.md
|
||||
0d. read the ticket and all comments to learn about past implementations and research, and any questions or concerns about them
|
||||
|
||||
### PART II - NEXT STEPS
|
||||
|
||||
think deeply
|
||||
|
||||
1. move the item to "plan in progress" using the MCP tools
|
||||
1a. read ./claude/commands/create_plan.md
|
||||
1b. determine if the item has a linked implementation plan document based on the `links` section
|
||||
1d. if the plan exists, you're done, respond with a link to the ticket
|
||||
1e. if the research is insufficient or has unaswered questions, create a new plan document following the instructions in ./claude/commands/create_plan.md
|
||||
|
||||
think deeply
|
||||
|
||||
2. when the plan is complete, `humanlayer thoughts sync` and attach the doc to the ticket using the MCP tools and create a terse comment with a link to it (re-read .claude/commands/linear.md if needed)
|
||||
2a. move the item to "plan in review" using the MCP tools
|
||||
|
||||
think deeply, use TodoWrite to track your tasks. When fetching from linear, get the top 10 items by priority but only work on ONE item - specifically the highest priority SMALL or XS sized issue.
|
||||
46
.claude/commands/ralph_research.md
Normal file
@@ -0,0 +1,46 @@
|
||||
## PART I - IF A LINEAR TICKET IS MENTIONED
|
||||
|
||||
0c. use `linear` cli to fetch the selected item into thoughts with the ticket number - ./thoughts/shared/tickets/ENG-xxxx.md
|
||||
0d. read the ticket and all comments to understand what research is needed and any previous attempts
|
||||
|
||||
## PART I - IF NO TICKET IS MENTIONED
|
||||
|
||||
0. read .claude/commands/linear.md
|
||||
0a. fetch the top 10 priority items from linear in status "research needed" using the MCP tools, noting all items in the `links` section
|
||||
0b. select the highest priority SMALL or XS issue from the list (if no SMALL or XS issues exist, EXIT IMMEDIATELY and inform the user)
|
||||
0c. use `linear` cli to fetch the selected item into thoughts with the ticket number - ./thoughts/shared/tickets/ENG-xxxx.md
|
||||
0d. read the ticket and all comments to understand what research is needed and any previous attempts
|
||||
|
||||
## PART II - NEXT STEPS
|
||||
|
||||
think deeply
|
||||
|
||||
1. move the item to "research in progress" using the MCP tools
|
||||
1a. read any linked documents in the `links` section to understand context
|
||||
1b. if insufficient information to conduct research, add a comment asking for clarification and move back to "research needed"
|
||||
|
||||
think deeply about the research needs
|
||||
|
||||
2. conduct the research:
|
||||
2a. read .claude/commands/research_codebase.md for guidance on effective codebase research
|
||||
2b. if the linear comments suggest web research is needed, use WebSearch to research external solutions, APIs, or best practices
|
||||
2c. search the codebase for relevant implementations and patterns
|
||||
2d. examine existing similar features or related code
|
||||
2e. identify technical constraints and opportunities
|
||||
2f. Be unbiased - don't think too much about an ideal implementation plan, just document all related files and how the systems work today
|
||||
2g. document findings in a new thoughts document: `thoughts/shared/research/ENG-XXXX_research.md`
|
||||
|
||||
think deeply about the findings
|
||||
|
||||
3. synthesize research into actionable insights:
|
||||
3a. summarize key findings and technical decisions
|
||||
3b. identify potential implementation approaches
|
||||
3c. note any risks or concerns discovered
|
||||
3d. run `humanlayer thoughts sync` to save the research
|
||||
|
||||
4. update the ticket:
|
||||
4a. attach the research document to the ticket using the MCP tools with proper link formatting
|
||||
4b. add a comment summarizing the research outcomes
|
||||
4c. move the item to "research in review" using the MCP tools
|
||||
|
||||
think deeply, use TodoWrite to track your tasks. When fetching from linear, get the top 10 items by priority but only work on ONE item - specifically the highest priority issue.
|
||||
172
.claude/commands/research_codebase.md
Normal file
@@ -0,0 +1,172 @@
|
||||
# Research Codebase
|
||||
|
||||
You are tasked with conducting comprehensive research across the codebase to answer user questions by spawning parallel sub-agents and synthesizing their findings.
|
||||
|
||||
## Initial Setup:
|
||||
|
||||
When this command is invoked, respond with:
|
||||
|
||||
```
|
||||
I'm ready to research the codebase. Please provide your research question or area of interest, and I'll analyze it thoroughly by exploring relevant components and connections.
|
||||
```
|
||||
|
||||
Then wait for the user's research query.
|
||||
|
||||
## Steps to follow after receiving the research query:
|
||||
|
||||
1. **Read any directly mentioned files first:**
|
||||
- If the user mentions specific files (crates, docs, JSON), read them FULLY first
|
||||
- **IMPORTANT**: Use the Read tool WITHOUT limit/offset parameters to read entire files
|
||||
- **CRITICAL**: Read these files yourself in the main context before spawning any sub-tasks
|
||||
- This ensures you have full context before decomposing the research
|
||||
|
||||
2. **Analyze and decompose the research question:**
|
||||
- Break down the user's query into composable research areas
|
||||
- Take time to ultrathink about the underlying patterns, connections, and architectural implications the user might be seeking
|
||||
- Identify specific components, patterns, or concepts to investigate
|
||||
- Create a research plan using TodoWrite to track all subtasks
|
||||
- Consider which directories, files, or architectural patterns are relevant
|
||||
|
||||
3. **Spawn parallel sub-agent tasks for comprehensive research:**
|
||||
- Create multiple Task agents to research different aspects concurrently
|
||||
- We now have specialized agents that know how to do specific research tasks:
|
||||
|
||||
**For codebase research:**
|
||||
- Use the **codebase-locator** agent to find WHERE files and components live
|
||||
- Use the **codebase-analyzer** agent to understand HOW specific code works
|
||||
|
||||
The key is to use these agents intelligently:
|
||||
- Start with locator agents to find what exists
|
||||
- Then use analyzer agents on the most promising findings
|
||||
- Run multiple agents in parallel when they're searching for different things
|
||||
- Each agent knows its job - just tell it what you're looking for
|
||||
- Don't write detailed prompts about HOW to search - the agents already know
|
||||
|
||||
4. **Wait for all sub-agents to complete and synthesize findings:**
|
||||
- IMPORTANT: Wait for ALL sub-agent tasks to complete before proceeding
|
||||
- Compile all sub-agent results (both codebase and thoughts findings)
|
||||
- Prioritize live codebase findings as primary source of truth
|
||||
- Use thoughts/ findings as supplementary historical context
|
||||
- Connect findings across different components
|
||||
- Include specific file paths and line numbers for reference
|
||||
- Verify all thoughts/ paths are correct (e.g., thoughts/allison/ not thoughts/shared/ for personal files)
|
||||
- Highlight patterns, connections, and architectural decisions
|
||||
- Answer the user's specific questions with concrete evidence
|
||||
|
||||
5. **Gather metadata for the research document:**
|
||||
- Run the `zed/script/spec_metadata.sh` script to generate all relevant metadata
|
||||
- Filename: `thoughts/shared/research/YYYY-MM-DD_HH-MM-SS_topic.md`
|
||||
|
||||
6. **Generate research document:**
|
||||
- Use the metadata gathered in step 4
|
||||
- Structure the document with YAML frontmatter followed by content:
|
||||
|
||||
```markdown
|
||||
---
|
||||
date: [Current date and time with timezone in ISO format]
|
||||
researcher: [Researcher name from thoughts status]
|
||||
git_commit: [Current commit hash]
|
||||
branch: [Current branch name]
|
||||
repository: [Repository name]
|
||||
topic: "[User's Question/Topic]"
|
||||
tags: [research, codebase, relevant-component-names]
|
||||
status: complete
|
||||
last_updated: [Current date in YYYY-MM-DD format]
|
||||
last_updated_by: [Researcher name]
|
||||
---
|
||||
|
||||
# Research: [User's Question/Topic]
|
||||
|
||||
**Date**: [Current date and time with timezone from step 4]
|
||||
**Researcher**: [Researcher name from thoughts status]
|
||||
**Git Commit**: [Current commit hash from step 4]
|
||||
**Branch**: [Current branch name from step 4]
|
||||
**Repository**: [Repository name]
|
||||
|
||||
## Research Question
|
||||
|
||||
[Original user query]
|
||||
|
||||
## Summary
|
||||
|
||||
[High-level findings answering the user's question]
|
||||
|
||||
## Detailed Findings
|
||||
|
||||
### [Component/Area 1]
|
||||
|
||||
- Finding with reference ([file.ext:line](link))
|
||||
- Connection to other components
|
||||
- Implementation details
|
||||
|
||||
### [Component/Area 2]
|
||||
|
||||
...
|
||||
|
||||
## Code References
|
||||
|
||||
- `path/to/file.py:123` - Description of what's there
|
||||
- `another/file.ts:45-67` - Description of the code block
|
||||
|
||||
## Architecture Insights
|
||||
|
||||
[Patterns, conventions, and design decisions discovered]
|
||||
|
||||
## Historical Context (from thoughts/)
|
||||
|
||||
[Relevant insights from thoughts/ directory with references]
|
||||
|
||||
- `thoughts/shared/something.md` - Historical decision about X
|
||||
- `thoughts/local/notes.md` - Past exploration of Y
|
||||
Note: Paths exclude "searchable/" even if found there
|
||||
|
||||
## Related Research
|
||||
|
||||
[Links to other research documents in thoughts/shared/research/]
|
||||
|
||||
## Open Questions
|
||||
|
||||
[Any areas that need further investigation]
|
||||
```
|
||||
|
||||
7. **Add GitHub permalinks (if applicable):**
|
||||
- Check if on main branch or if commit is pushed: `git branch --show-current` and `git status`
|
||||
- If on main/master or pushed, generate GitHub permalinks:
|
||||
- Get repo info: `gh repo view --json owner,name`
|
||||
- Create permalinks: `https://github.com/{owner}/{repo}/blob/{commit}/{file}#L{line}`
|
||||
- Replace local file references with permalinks in the document
|
||||
|
||||
8. **Handle follow-up questions:**
|
||||
- If the user has follow-up questions, append to the same research document
|
||||
- Update the frontmatter fields `last_updated` and `last_updated_by` to reflect the update
|
||||
- Add `last_updated_note: "Added follow-up research for [brief description]"` to frontmatter
|
||||
- Add a new section: `## Follow-up Research [timestamp]`
|
||||
- Spawn new sub-agents as needed for additional investigation
|
||||
- Continue updating the document and syncing
|
||||
|
||||
## Important notes:
|
||||
|
||||
- Always use parallel Task agents to maximize efficiency and minimize context usage
|
||||
- Always run fresh codebase research - never rely solely on existing research documents
|
||||
- The thoughts/ directory provides historical context to supplement live findings
|
||||
- Focus on finding concrete file paths and line numbers for developer reference
|
||||
- Research documents should be self-contained with all necessary context
|
||||
- Each sub-agent prompt should be specific and focused on read-only operations
|
||||
- Consider cross-component connections and architectural patterns
|
||||
- Include temporal context (when the research was conducted)
|
||||
- Link to GitHub when possible for permanent references
|
||||
- Keep the main agent focused on synthesis, not deep file reading
|
||||
- Encourage sub-agents to find examples and usage patterns, not just definitions
|
||||
- Explore all of thoughts/ directory, not just research subdirectory
|
||||
- **File reading**: Always read mentioned files FULLY (no limit/offset) before spawning sub-tasks
|
||||
- **Critical ordering**: Follow the numbered steps exactly
|
||||
- ALWAYS read mentioned files first before spawning sub-tasks (step 1)
|
||||
- ALWAYS wait for all sub-agents to complete before synthesizing (step 4)
|
||||
- ALWAYS gather metadata before writing the document (step 5 before step 6)
|
||||
- NEVER write the research document with placeholder values
|
||||
- **Frontmatter consistency**:
|
||||
- Always include frontmatter at the beginning of research documents
|
||||
- Keep frontmatter fields consistent across all research documents
|
||||
- Update frontmatter when adding follow-up research
|
||||
- Use snake_case for multi-word field names (e.g., `last_updated`, `git_commit`)
|
||||
- Tags should be relevant to the research topic and components studied
|
||||
162
.claude/commands/validate_plan.md
Normal file
@@ -0,0 +1,162 @@
|
||||
# Validate Plan
|
||||
|
||||
You are tasked with validating that an implementation plan was correctly executed, verifying all success criteria and identifying any deviations or issues.
|
||||
|
||||
## Initial Setup
|
||||
|
||||
When invoked:
|
||||
1. **Determine context** - Are you in an existing conversation or starting fresh?
|
||||
- If existing: Review what was implemented in this session
|
||||
- If fresh: Need to discover what was done through git and codebase analysis
|
||||
|
||||
2. **Locate the plan**:
|
||||
- If plan path provided, use it
|
||||
- Otherwise, search recent commits for plan references or ask user
|
||||
|
||||
3. **Gather implementation evidence**:
|
||||
```bash
|
||||
# Check recent commits
|
||||
git log --oneline -n 20
|
||||
git diff HEAD~N..HEAD # Where N covers implementation commits
|
||||
|
||||
# Run comprehensive checks
|
||||
cd $(git rev-parse --show-toplevel) && make check test
|
||||
```
|
||||
|
||||
## Validation Process
|
||||
|
||||
### Step 1: Context Discovery
|
||||
|
||||
If starting fresh or need more context:
|
||||
|
||||
1. **Read the implementation plan** completely
|
||||
2. **Identify what should have changed**:
|
||||
- List all files that should be modified
|
||||
- Note all success criteria (automated and manual)
|
||||
- Identify key functionality to verify
|
||||
|
||||
3. **Spawn parallel research tasks** to discover implementation:
|
||||
```
|
||||
Task 1 - Verify database changes:
|
||||
Research if migration [N] was added and schema changes match plan.
|
||||
Check: migration files, schema version, table structure
|
||||
Return: What was implemented vs what plan specified
|
||||
|
||||
Task 2 - Verify code changes:
|
||||
Find all modified files related to [feature].
|
||||
Compare actual changes to plan specifications.
|
||||
Return: File-by-file comparison of planned vs actual
|
||||
|
||||
Task 3 - Verify test coverage:
|
||||
Check if tests were added/modified as specified.
|
||||
Run test commands and capture results.
|
||||
Return: Test status and any missing coverage
|
||||
```
|
||||
|
||||
### Step 2: Systematic Validation
|
||||
|
||||
For each phase in the plan:
|
||||
|
||||
1. **Check completion status**:
|
||||
- Look for checkmarks in the plan (- [x])
|
||||
- Verify the actual code matches claimed completion
|
||||
|
||||
2. **Run automated verification**:
|
||||
- Execute each command from "Automated Verification"
|
||||
- Document pass/fail status
|
||||
- If failures, investigate root cause
|
||||
|
||||
3. **Assess manual criteria**:
|
||||
- List what needs manual testing
|
||||
- Provide clear steps for user verification
|
||||
|
||||
4. **Think deeply about edge cases**:
|
||||
- Were error conditions handled?
|
||||
- Are there missing validations?
|
||||
- Could the implementation break existing functionality?
|
||||
|
||||
### Step 3: Generate Validation Report
|
||||
|
||||
Create comprehensive validation summary:
|
||||
|
||||
```markdown
|
||||
## Validation Report: [Plan Name]
|
||||
|
||||
### Implementation Status
|
||||
✓ Phase 1: [Name] - Fully implemented
|
||||
✓ Phase 2: [Name] - Fully implemented
|
||||
⚠️ Phase 3: [Name] - Partially implemented (see issues)
|
||||
|
||||
### Automated Verification Results
|
||||
✓ Build passes: `make build`
|
||||
✓ Tests pass: `make test`
|
||||
✗ Linting issues: `make lint` (3 warnings)
|
||||
|
||||
### Code Review Findings
|
||||
|
||||
#### Matches Plan:
|
||||
- Database migration correctly adds [table]
|
||||
- API endpoints implement specified methods
|
||||
- Error handling follows plan
|
||||
|
||||
#### Deviations from Plan:
|
||||
- Used different variable names in [file:line]
|
||||
- Added extra validation in [file:line] (improvement)
|
||||
|
||||
#### Potential Issues:
|
||||
- Missing index on foreign key could impact performance
|
||||
- No rollback handling in migration
|
||||
|
||||
### Manual Testing Required:
|
||||
1. UI functionality:
|
||||
- [ ] Verify [feature] appears correctly
|
||||
- [ ] Test error states with invalid input
|
||||
|
||||
2. Integration:
|
||||
- [ ] Confirm works with existing [component]
|
||||
- [ ] Check performance with large datasets
|
||||
|
||||
### Recommendations:
|
||||
- Address linting warnings before merge
|
||||
- Consider adding integration test for [scenario]
|
||||
- Document new API endpoints
|
||||
```
|
||||
|
||||
## Working with Existing Context
|
||||
|
||||
If you were part of the implementation:
|
||||
- Review the conversation history
|
||||
- Check your todo list for what was completed
|
||||
- Focus validation on work done in this session
|
||||
- Be honest about any shortcuts or incomplete items
|
||||
|
||||
## Important Guidelines
|
||||
|
||||
1. **Be thorough but practical** - Focus on what matters
|
||||
2. **Run all automated checks** - Don't skip verification commands
|
||||
3. **Document everything** - Both successes and issues
|
||||
4. **Think critically** - Question if the implementation truly solves the problem
|
||||
5. **Consider maintenance** - Will this be maintainable long-term?
|
||||
|
||||
## Validation Checklist
|
||||
|
||||
Always verify:
|
||||
- [ ] All phases marked complete are actually done
|
||||
- [ ] Automated tests pass
|
||||
- [ ] Code follows existing patterns
|
||||
- [ ] No regressions introduced
|
||||
- [ ] Error handling is robust
|
||||
- [ ] Documentation updated if needed
|
||||
- [ ] Manual test steps are clear
|
||||
|
||||
## Relationship to Other Commands
|
||||
|
||||
Recommended workflow:
|
||||
1. `/implement_plan` - Execute the implementation
|
||||
2. `/commit` - Create atomic commits for changes
|
||||
3. `/validate_plan` - Verify implementation correctness
|
||||
4. `/describe_pr` - Generate PR description
|
||||
|
||||
The validation works best after commits are made, as it can analyze the git history to understand what was implemented.
|
||||
|
||||
Remember: Good validation catches issues before they reach production. Be constructive but thorough in identifying gaps or improvements.
|
||||
10
.claude/settings.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
// "Bash(./hack/spec_metadata.sh)",
|
||||
// "Bash(hack/spec_metadata.sh)",
|
||||
// "Bash(bash hack/spec_metadata.sh)"
|
||||
]
|
||||
},
|
||||
"enableAllProjectMcpServers": false
|
||||
}
|
||||
31
.claude/settings.local.json
Normal file
@@ -0,0 +1,31 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Read(/Users/mikaylamaki/projects/zed-work/zed-monorepo-real/**)",
|
||||
"Read(/Users/nathan/src/agent-client-protocol/rust/**)",
|
||||
"Read(/Users/nathan/src/agent-client-protocol/rust/**)",
|
||||
"Read(/Users/nathan/src/agent-client-protocol/rust/**)",
|
||||
"Read(/Users/nathan/src/agent-client-protocol/rust/**)",
|
||||
"Bash(git add:*)",
|
||||
"Read(/Users/nathan/src/agent-client-protocol/rust/**)",
|
||||
"Bash(./script/spec_metadata.sh:*)",
|
||||
"Bash(npm run generate:*)",
|
||||
"Bash(npm run typecheck:*)",
|
||||
"Bash(npm run:*)",
|
||||
"Bash(npm install)",
|
||||
"Bash(grep:*)",
|
||||
"Bash(find:*)",
|
||||
"Bash(node:*)",
|
||||
"Bash(cargo check:*)",
|
||||
"Bash(cargo test)",
|
||||
"Bash(npx tsc:*)"
|
||||
],
|
||||
"additionalDirectories": [
|
||||
"/Users/mikaylamaki/projects/zed-work/zed-monorepo-real/claude-code-acp/",
|
||||
"/Users/mikaylamaki/projects/zed-work/zed-monorepo-real/agentic-coding-protocol/",
|
||||
"/Users/nathan/src/agent",
|
||||
"/Users/nathan/src/agent-client-protocol/",
|
||||
"/Users/nathan/src/claude-code-acp"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -24,9 +24,9 @@ workspace-members = [
|
||||
third-party = [
|
||||
{ name = "reqwest", version = "0.11.27" },
|
||||
# build of remote_server should not include scap / its x11 dependency
|
||||
{ name = "zed-scap", git = "https://github.com/zed-industries/scap", rev = "4afea48c3b002197176fb19cd0f9b180dd36eaac", version = "0.0.8-zed" },
|
||||
{ name = "scap", git = "https://github.com/zed-industries/scap", rev = "808aa5c45b41e8f44729d02e38fd00a2fe2722e7" },
|
||||
# build of remote_server should not need to include on libalsa through rodio
|
||||
{ name = "rodio", git = "https://github.com/RustAudio/rodio" },
|
||||
{ name = "rodio" },
|
||||
]
|
||||
|
||||
[final-excludes]
|
||||
@@ -37,6 +37,9 @@ workspace-members = [
|
||||
"zed_glsl",
|
||||
"zed_html",
|
||||
"zed_proto",
|
||||
"zed_ruff",
|
||||
"slash_commands_example",
|
||||
"zed_snippets",
|
||||
"zed_test_extension",
|
||||
"zed_toml",
|
||||
]
|
||||
|
||||
3
.gitattributes
vendored
@@ -1,5 +1,2 @@
|
||||
# Prevent GitHub from displaying comments within JSON files as errors.
|
||||
*.json linguist-language=JSON-with-Comments
|
||||
|
||||
# Ensure the WSL script always has LF line endings, even on Windows
|
||||
crates/zed/resources/windows/zed.sh text eol=lf
|
||||
|
||||
35
.github/ISSUE_TEMPLATE/07_bug_windows.yml
vendored
@@ -1,35 +0,0 @@
|
||||
name: Bug Report (Windows)
|
||||
description: Zed Windows Related Bugs
|
||||
type: "Bug"
|
||||
labels: ["windows"]
|
||||
title: "Windows: <a short description of the Windows bug>"
|
||||
body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Summary
|
||||
description: Describe the bug with a one-line summary, and provide detailed reproduction steps
|
||||
value: |
|
||||
<!-- Please insert a one-line summary of the issue below -->
|
||||
SUMMARY_SENTENCE_HERE
|
||||
|
||||
### Description
|
||||
<!-- Describe with sufficient detail to reproduce from a clean Zed install. -->
|
||||
Steps to trigger the problem:
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
**Expected Behavior**:
|
||||
**Actual Behavior**:
|
||||
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: environment
|
||||
attributes:
|
||||
label: Zed Version and System Specs
|
||||
description: 'Open Zed, and in the command palette select "zed: copy system specs into clipboard"'
|
||||
placeholder: |
|
||||
Output of "zed: copy system specs into clipboard"
|
||||
validations:
|
||||
required: true
|
||||
@@ -1,8 +1,8 @@
|
||||
name: Bug Report (Windows Beta)
|
||||
description: Zed Windows Beta Related Bugs
|
||||
name: Bug Report (Windows Alpha)
|
||||
description: Zed Windows Alpha Related Bugs
|
||||
type: "Bug"
|
||||
labels: ["windows"]
|
||||
title: "Windows Beta: <a short description of the Windows bug>"
|
||||
title: "Windows Alpha: <a short description of the Windows bug>"
|
||||
body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
2
.github/actions/run_tests/action.yml
vendored
@@ -20,4 +20,4 @@ runs:
|
||||
|
||||
- name: Run tests
|
||||
shell: bash -euxo pipefail {0}
|
||||
run: cargo nextest run --workspace --no-fail-fast --failure-output immediate-final
|
||||
run: cargo nextest run --workspace --no-fail-fast
|
||||
|
||||
161
.github/actions/run_tests_windows/action.yml
vendored
@@ -20,8 +20,167 @@ runs:
|
||||
with:
|
||||
node-version: "18"
|
||||
|
||||
- name: Configure crash dumps
|
||||
shell: powershell
|
||||
run: |
|
||||
# Record the start time for this CI run
|
||||
$runStartTime = Get-Date
|
||||
$runStartTimeStr = $runStartTime.ToString("yyyy-MM-dd HH:mm:ss")
|
||||
Write-Host "CI run started at: $runStartTimeStr"
|
||||
|
||||
# Save the timestamp for later use
|
||||
echo "CI_RUN_START_TIME=$($runStartTime.Ticks)" >> $env:GITHUB_ENV
|
||||
|
||||
# Create crash dump directory in workspace (non-persistent)
|
||||
$dumpPath = "$env:GITHUB_WORKSPACE\crash_dumps"
|
||||
New-Item -ItemType Directory -Force -Path $dumpPath | Out-Null
|
||||
|
||||
Write-Host "Setting up crash dump detection..."
|
||||
Write-Host "Workspace dump path: $dumpPath"
|
||||
|
||||
# Note: We're NOT modifying registry on stateful runners
|
||||
# Instead, we'll check default Windows crash locations after tests
|
||||
|
||||
- name: Run tests
|
||||
shell: powershell
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
cargo nextest run --workspace --no-fail-fast --failure-output immediate-final
|
||||
$env:RUST_BACKTRACE = "full"
|
||||
|
||||
# Enable Windows debugging features
|
||||
$env:_NT_SYMBOL_PATH = "srv*https://msdl.microsoft.com/download/symbols"
|
||||
|
||||
# .NET crash dump environment variables (ephemeral)
|
||||
$env:COMPlus_DbgEnableMiniDump = "1"
|
||||
$env:COMPlus_DbgMiniDumpType = "4"
|
||||
$env:COMPlus_CreateDumpDiagnostics = "1"
|
||||
|
||||
cargo nextest run --workspace --no-fail-fast
|
||||
|
||||
- name: Analyze crash dumps
|
||||
if: always()
|
||||
shell: powershell
|
||||
run: |
|
||||
Write-Host "Checking for crash dumps..."
|
||||
|
||||
# Get the CI run start time from the environment
|
||||
$runStartTime = [DateTime]::new([long]$env:CI_RUN_START_TIME)
|
||||
Write-Host "Only analyzing dumps created after: $($runStartTime.ToString('yyyy-MM-dd HH:mm:ss'))"
|
||||
|
||||
# Check all possible crash dump locations
|
||||
$searchPaths = @(
|
||||
"$env:GITHUB_WORKSPACE\crash_dumps",
|
||||
"$env:LOCALAPPDATA\CrashDumps",
|
||||
"$env:TEMP",
|
||||
"$env:GITHUB_WORKSPACE",
|
||||
"$env:USERPROFILE\AppData\Local\CrashDumps",
|
||||
"C:\Windows\System32\config\systemprofile\AppData\Local\CrashDumps"
|
||||
)
|
||||
|
||||
$dumps = @()
|
||||
foreach ($path in $searchPaths) {
|
||||
if (Test-Path $path) {
|
||||
Write-Host "Searching in: $path"
|
||||
$found = Get-ChildItem "$path\*.dmp" -ErrorAction SilentlyContinue | Where-Object {
|
||||
$_.CreationTime -gt $runStartTime
|
||||
}
|
||||
if ($found) {
|
||||
$dumps += $found
|
||||
Write-Host " Found $($found.Count) dump(s) from this CI run"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ($dumps) {
|
||||
Write-Host "Found $($dumps.Count) crash dump(s)"
|
||||
|
||||
# Install debugging tools if not present
|
||||
$cdbPath = "C:\Program Files (x86)\Windows Kits\10\Debuggers\x64\cdb.exe"
|
||||
if (-not (Test-Path $cdbPath)) {
|
||||
Write-Host "Installing Windows Debugging Tools..."
|
||||
$url = "https://go.microsoft.com/fwlink/?linkid=2237387"
|
||||
Invoke-WebRequest -Uri $url -OutFile winsdksetup.exe
|
||||
Start-Process -Wait winsdksetup.exe -ArgumentList "/features OptionId.WindowsDesktopDebuggers /quiet"
|
||||
}
|
||||
|
||||
foreach ($dump in $dumps) {
|
||||
Write-Host "`n=================================="
|
||||
Write-Host "Analyzing crash dump: $($dump.Name)"
|
||||
Write-Host "Size: $([math]::Round($dump.Length / 1MB, 2)) MB"
|
||||
Write-Host "Time: $($dump.CreationTime)"
|
||||
Write-Host "=================================="
|
||||
|
||||
# Set symbol path
|
||||
$env:_NT_SYMBOL_PATH = "srv*C:\symbols*https://msdl.microsoft.com/download/symbols"
|
||||
|
||||
# Run analysis
|
||||
$analysisOutput = & $cdbPath -z $dump.FullName -c "!analyze -v; ~*k; lm; q" 2>&1 | Out-String
|
||||
|
||||
# Extract key information
|
||||
if ($analysisOutput -match "ExceptionCode:\s*([\w]+)") {
|
||||
Write-Host "Exception Code: $($Matches[1])"
|
||||
if ($Matches[1] -eq "c0000005") {
|
||||
Write-Host "Exception Type: ACCESS VIOLATION"
|
||||
}
|
||||
}
|
||||
|
||||
if ($analysisOutput -match "EXCEPTION_RECORD:\s*(.+)") {
|
||||
Write-Host "Exception Record: $($Matches[1])"
|
||||
}
|
||||
|
||||
if ($analysisOutput -match "FAULTING_IP:\s*\n(.+)") {
|
||||
Write-Host "Faulting Instruction: $($Matches[1])"
|
||||
}
|
||||
|
||||
# Save full analysis
|
||||
$analysisFile = "$($dump.FullName).analysis.txt"
|
||||
$analysisOutput | Out-File -FilePath $analysisFile
|
||||
Write-Host "`nFull analysis saved to: $analysisFile"
|
||||
|
||||
# Print stack trace section
|
||||
Write-Host "`n--- Stack Trace Preview ---"
|
||||
$stackSection = $analysisOutput -split "STACK_TEXT:" | Select-Object -Last 1
|
||||
$stackLines = $stackSection -split "`n" | Select-Object -First 20
|
||||
$stackLines | ForEach-Object { Write-Host $_ }
|
||||
Write-Host "--- End Stack Trace Preview ---"
|
||||
}
|
||||
|
||||
Write-Host "`n⚠️ Crash dumps detected! Download the 'crash-dumps' artifact for detailed analysis."
|
||||
|
||||
# Copy dumps to workspace for artifact upload
|
||||
$artifactPath = "$env:GITHUB_WORKSPACE\crash_dumps_collected"
|
||||
New-Item -ItemType Directory -Force -Path $artifactPath | Out-Null
|
||||
|
||||
foreach ($dump in $dumps) {
|
||||
$destName = "$($dump.Directory.Name)_$($dump.Name)"
|
||||
Copy-Item $dump.FullName -Destination "$artifactPath\$destName"
|
||||
if (Test-Path "$($dump.FullName).analysis.txt") {
|
||||
Copy-Item "$($dump.FullName).analysis.txt" -Destination "$artifactPath\$destName.analysis.txt"
|
||||
}
|
||||
}
|
||||
|
||||
Write-Host "Copied $($dumps.Count) dump(s) to artifact directory"
|
||||
} else {
|
||||
Write-Host "No crash dumps from this CI run found"
|
||||
}
|
||||
|
||||
- name: Upload crash dumps
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: crash-dumps-${{ github.run_id }}-${{ github.run_attempt }}
|
||||
path: |
|
||||
crash_dumps_collected/*.dmp
|
||||
crash_dumps_collected/*.txt
|
||||
if-no-files-found: ignore
|
||||
retention-days: 7
|
||||
|
||||
- name: Check test results
|
||||
shell: powershell
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
run: |
|
||||
# Re-check test results to fail the job if tests failed
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
Write-Host "Tests failed with exit code: $LASTEXITCODE"
|
||||
exit $LASTEXITCODE
|
||||
}
|
||||
|
||||
51
.github/workflows/ci.yml
vendored
@@ -81,7 +81,6 @@ jobs:
|
||||
echo "run_license=false" >> "$GITHUB_OUTPUT"
|
||||
|
||||
echo "$CHANGED_FILES" | grep -qP '^(nix/|flake\.|Cargo\.|rust-toolchain.toml|\.cargo/config.toml)' && \
|
||||
echo "$GITHUB_REF_NAME" | grep -qvP '^v[0-9]+\.[0-9]+\.[0-9x](-pre)?$' && \
|
||||
echo "run_nix=true" >> "$GITHUB_OUTPUT" || \
|
||||
echo "run_nix=false" >> "$GITHUB_OUTPUT"
|
||||
|
||||
@@ -373,46 +372,6 @@ jobs:
|
||||
if: always()
|
||||
run: rm -rf ./../.cargo
|
||||
|
||||
doctests:
|
||||
# Nextest currently doesn't support doctests, so run them separately and in parallel.
|
||||
timeout-minutes: 60
|
||||
name: (Linux) Run doctests
|
||||
needs: [job_spec]
|
||||
if: |
|
||||
github.repository_owner == 'zed-industries' &&
|
||||
needs.job_spec.outputs.run_tests == 'true'
|
||||
runs-on:
|
||||
- namespace-profile-16x32-ubuntu-2204
|
||||
steps:
|
||||
- name: Add Rust to the PATH
|
||||
run: echo "$HOME/.cargo/bin" >> "$GITHUB_PATH"
|
||||
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
with:
|
||||
clean: false
|
||||
|
||||
- name: Cache dependencies
|
||||
uses: swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2
|
||||
with:
|
||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
# cache-provider: "buildjet"
|
||||
|
||||
- name: Install Linux dependencies
|
||||
run: ./script/linux
|
||||
|
||||
- name: Configure CI
|
||||
run: |
|
||||
mkdir -p ./../.cargo
|
||||
cp ./.cargo/ci-config.toml ./../.cargo/config.toml
|
||||
|
||||
- name: Run doctests
|
||||
run: cargo test --workspace --doc --no-fail-fast
|
||||
|
||||
- name: Clean CI config file
|
||||
if: always()
|
||||
run: rm -rf ./../.cargo
|
||||
|
||||
build_remote_server:
|
||||
timeout-minutes: 60
|
||||
name: (Linux) Build Remote Server
|
||||
@@ -826,9 +785,8 @@ jobs:
|
||||
timeout-minutes: 120
|
||||
name: Create a Windows installer
|
||||
runs-on: [self-32vcpu-windows-2022]
|
||||
if: |
|
||||
( startsWith(github.ref, 'refs/tags/v')
|
||||
|| contains(github.event.pull_request.labels.*.name, 'run-bundling') )
|
||||
if: contains(github.event.pull_request.labels.*.name, 'run-bundling')
|
||||
# if: (startsWith(github.ref, 'refs/tags/v') || contains(github.event.pull_request.labels.*.name, 'run-bundling'))
|
||||
needs: [windows_tests]
|
||||
env:
|
||||
AZURE_TENANT_ID: ${{ secrets.AZURE_SIGNING_TENANT_ID }}
|
||||
@@ -866,12 +824,13 @@ jobs:
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
|
||||
if: contains(github.event.pull_request.labels.*.name, 'run-bundling')
|
||||
with:
|
||||
name: Zed_${{ github.event.pull_request.head.sha || github.sha }}-x86_64.exe
|
||||
name: ZedEditorUserSetup-x64-${{ github.event.pull_request.head.sha || github.sha }}.exe
|
||||
path: ${{ env.SETUP_PATH }}
|
||||
|
||||
- name: Upload Artifacts to release
|
||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v1
|
||||
if: ${{ !(contains(github.event.pull_request.labels.*.name, 'run-bundling')) }}
|
||||
# Re-enable when we are ready to publish windows preview releases
|
||||
if: ${{ !(contains(github.event.pull_request.labels.*.name, 'run-bundling')) && env.RELEASE_CHANNEL == 'preview' }} # upload only preview
|
||||
with:
|
||||
draft: true
|
||||
prerelease: ${{ env.RELEASE_CHANNEL == 'preview' }}
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
name: Community Champion Auto Labeler
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
pull_request_target:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
label_community_champion:
|
||||
if: github.repository_owner == 'zed-industries'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check if author is a community champion and apply label
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const communityChampionBody = `${{ secrets.COMMUNITY_CHAMPIONS }}`;
|
||||
|
||||
const communityChampions = communityChampionBody
|
||||
.split('\n')
|
||||
.map(handle => handle.trim().toLowerCase());
|
||||
|
||||
let author;
|
||||
if (context.eventName === 'issues') {
|
||||
author = context.payload.issue.user.login;
|
||||
} else if (context.eventName === 'pull_request_target') {
|
||||
author = context.payload.pull_request.user.login;
|
||||
}
|
||||
|
||||
if (!author || !communityChampions.includes(author.toLowerCase())) {
|
||||
return;
|
||||
}
|
||||
|
||||
const issueNumber = context.payload.issue?.number || context.payload.pull_request?.number;
|
||||
|
||||
try {
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: issueNumber,
|
||||
labels: ['community champion']
|
||||
});
|
||||
|
||||
console.log(`Applied 'community champion' label to #${issueNumber} by ${author}`);
|
||||
} catch (error) {
|
||||
console.error(`Failed to apply label: ${error.message}`);
|
||||
}
|
||||
11
.github/workflows/community_release_actions.yml
vendored
@@ -1,6 +1,3 @@
|
||||
# IF YOU UPDATE THE NAME OF ANY GITHUB SECRET, YOU MUST CHERRY PICK THE COMMIT
|
||||
# TO BOTH STABLE AND PREVIEW CHANNELS
|
||||
|
||||
name: Release Actions
|
||||
|
||||
on:
|
||||
@@ -16,9 +13,9 @@ jobs:
|
||||
id: get-release-url
|
||||
run: |
|
||||
if [ "${{ github.event.release.prerelease }}" == "true" ]; then
|
||||
URL="https://zed.dev/releases/preview"
|
||||
URL="https://zed.dev/releases/preview/latest"
|
||||
else
|
||||
URL="https://zed.dev/releases/stable"
|
||||
URL="https://zed.dev/releases/stable/latest"
|
||||
fi
|
||||
|
||||
echo "URL=$URL" >> "$GITHUB_OUTPUT"
|
||||
@@ -35,11 +32,11 @@ jobs:
|
||||
- name: Discord Webhook Action
|
||||
uses: tsickert/discord-webhook@c840d45a03a323fbc3f7507ac7769dbd91bfb164 # v5.3.0
|
||||
with:
|
||||
webhook-url: ${{ secrets.DISCORD_WEBHOOK_RELEASE_NOTES }}
|
||||
webhook-url: ${{ secrets.DISCORD_WEBHOOK_URL }}
|
||||
content: ${{ steps.get-content.outputs.string }}
|
||||
|
||||
send_release_notes_email:
|
||||
if: false && github.repository_owner == 'zed-industries' && !github.event.release.prerelease
|
||||
if: github.repository_owner == 'zed-industries' && !github.event.release.prerelease
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
57
.github/workflows/congrats.yml
vendored
@@ -1,57 +0,0 @@
|
||||
name: Congratsbot
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
check-author:
|
||||
if: ${{ github.repository_owner == 'zed-industries' }}
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_congratulate: ${{ steps.check.outputs.should_congratulate }}
|
||||
steps:
|
||||
- name: Get PR info and check if author is external
|
||||
id: check
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.CONGRATSBOT_GITHUB_TOKEN }}
|
||||
script: |
|
||||
const { data: prs } = await github.rest.repos.listPullRequestsAssociatedWithCommit({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
commit_sha: context.sha
|
||||
});
|
||||
|
||||
if (prs.length === 0) {
|
||||
core.setOutput('should_congratulate', 'false');
|
||||
return;
|
||||
}
|
||||
|
||||
const mergedPR = prs.find(pr => pr.merged_at !== null) || prs[0];
|
||||
const prAuthor = mergedPR.user.login;
|
||||
|
||||
try {
|
||||
await github.rest.teams.getMembershipForUserInOrg({
|
||||
org: 'zed-industries',
|
||||
team_slug: 'staff',
|
||||
username: prAuthor
|
||||
});
|
||||
core.setOutput('should_congratulate', 'false');
|
||||
} catch (error) {
|
||||
if (error.status === 404) {
|
||||
core.setOutput('should_congratulate', 'true');
|
||||
} else {
|
||||
console.error(`Error checking team membership: ${error.message}`);
|
||||
core.setOutput('should_congratulate', 'false');
|
||||
}
|
||||
}
|
||||
|
||||
congrats:
|
||||
needs: check-author
|
||||
if: needs.check-author.outputs.should_congratulate == 'true'
|
||||
uses: withastro/automation/.github/workflows/congratsbot.yml@main
|
||||
with:
|
||||
EMOJIS: 🎉,🎊,🧑🚀,🥳,🙌,🚀,🦀,🔥,🚢
|
||||
secrets:
|
||||
DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK_CONGRATS }}
|
||||
36
.github/workflows/good_first_issue_notifier.yml
vendored
@@ -1,36 +0,0 @@
|
||||
name: Good First Issue Notifier
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
handle-good-first-issue:
|
||||
if: github.event.label.name == 'good first issue' && github.repository_owner == 'zed-industries'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Prepare Discord message
|
||||
id: prepare-message
|
||||
env:
|
||||
ISSUE_TITLE: ${{ github.event.issue.title }}
|
||||
ISSUE_NUMBER: ${{ github.event.issue.number }}
|
||||
ISSUE_URL: ${{ github.event.issue.html_url }}
|
||||
ISSUE_AUTHOR: ${{ github.event.issue.user.login }}
|
||||
run: |
|
||||
MESSAGE="[${ISSUE_TITLE} (#${ISSUE_NUMBER})](<${ISSUE_URL}>)"
|
||||
|
||||
{
|
||||
echo "message<<EOF"
|
||||
echo "$MESSAGE"
|
||||
echo "EOF"
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Discord Webhook Action
|
||||
uses: tsickert/discord-webhook@c840d45a03a323fbc3f7507ac7769dbd91bfb164 # v5.3.0
|
||||
with:
|
||||
webhook-url: ${{ secrets.DISCORD_WEBHOOK_GOOD_FIRST_ISSUE }}
|
||||
content: ${{ steps.prepare-message.outputs.message }}
|
||||
33
.github/workflows/issue_response.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
name: Issue Response
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 12 * * 2"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
issue-response:
|
||||
if: github.repository_owner == 'zed-industries'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- uses: pnpm/action-setup@fe02b34f77f8bc703788d5817da081398fad5dd2 # v4.0.0
|
||||
with:
|
||||
version: 9
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: "20"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: "script/issue_response/pnpm-lock.yaml"
|
||||
|
||||
- run: pnpm install --dir script/issue_response
|
||||
|
||||
- name: Run Issue Response
|
||||
run: pnpm run --dir script/issue_response start
|
||||
env:
|
||||
ISSUE_RESPONSE_GITHUB_TOKEN: ${{ secrets.ISSUE_RESPONSE_GITHUB_TOKEN }}
|
||||
SLACK_ISSUE_RESPONSE_WEBHOOK_URL: ${{ secrets.SLACK_ISSUE_RESPONSE_WEBHOOK_URL }}
|
||||
1
.gitignore
vendored
@@ -20,7 +20,6 @@
|
||||
.venv
|
||||
.vscode
|
||||
.wrangler
|
||||
.perf-runs
|
||||
/assets/*licenses.*
|
||||
/crates/collab/seed.json
|
||||
/crates/theme/schemas/theme.json
|
||||
|
||||
15
.rules
@@ -12,19 +12,6 @@
|
||||
- Example: avoid `let _ = client.request(...).await?;` - use `client.request(...).await?;` instead
|
||||
* When implementing async operations that may fail, ensure errors propagate to the UI layer so users get meaningful feedback.
|
||||
* Never create files with `mod.rs` paths - prefer `src/some_module.rs` instead of `src/some_module/mod.rs`.
|
||||
* When creating new crates, prefer specifying the library root path in `Cargo.toml` using `[lib] path = "...rs"` instead of the default `lib.rs`, to maintain consistent and descriptive naming (e.g., `gpui.rs` or `main.rs`).
|
||||
* Avoid creative additions unless explicitly requested
|
||||
* Use full words for variable names (no abbreviations like "q" for "queue")
|
||||
* Use variable shadowing to scope clones in async contexts for clarity, minimizing the lifetime of borrowed references.
|
||||
Example:
|
||||
```rust
|
||||
executor.spawn({
|
||||
let task_ran = task_ran.clone();
|
||||
async move {
|
||||
*task_ran.borrow_mut() = true;
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
# GPUI
|
||||
|
||||
@@ -59,7 +46,7 @@ Trying to update an entity while it's already being updated must be avoided as t
|
||||
|
||||
When `read_with`, `update`, or `update_in` are used with an async context, the closure's return value is wrapped in an `anyhow::Result`.
|
||||
|
||||
`WeakEntity<T>` is a weak handle. It has `read_with`, `update`, and `update_in` methods that work the same, but always return an `anyhow::Result` so that they can fail if the entity no longer exists. This can be useful to avoid memory leaks - if entities have mutually recursive handles to each other they will never be dropped.
|
||||
`WeakEntity<T>` is a weak handle. It has `read_with`, `update`, and `update_in` methods that work the same, but always return an `anyhow::Result` so that they can fail if the entity no longer exists. This can be useful to avoid memory leaks - if entities have mutually recursive handles to eachother they will never be dropped.
|
||||
|
||||
## Concurrency
|
||||
|
||||
|
||||
@@ -1,76 +1,54 @@
|
||||
# Contributing to Zed
|
||||
|
||||
Thank you for helping us make Zed better!
|
||||
Thanks for your interest in contributing to Zed, the collaborative platform that is also a code editor!
|
||||
|
||||
All activity in Zed forums is subject to our [Code of
|
||||
Conduct](https://zed.dev/code-of-conduct). Additionally, contributors must sign
|
||||
our [Contributor License Agreement](https://zed.dev/cla) before their
|
||||
contributions can be merged.
|
||||
All activity in Zed forums is subject to our [Code of Conduct](https://zed.dev/code-of-conduct). Additionally, contributors must sign our [Contributor License Agreement](https://zed.dev/cla) before their contributions can be merged.
|
||||
|
||||
## Contribution ideas
|
||||
|
||||
Zed is a large project with a number of priorities. We spend most of
|
||||
our time working on what we believe the product needs, but we also love working
|
||||
with the community to improve the product in ways we haven't thought of (or had time to get to yet!)
|
||||
If you're looking for ideas about what to work on, check out:
|
||||
|
||||
In particular we love PRs that are:
|
||||
|
||||
- Fixes to existing bugs and issues.
|
||||
- Small enhancements to existing features, particularly to make them work for more people.
|
||||
- Small extra features, like keybindings or actions you miss from other editors or extensions.
|
||||
- Work towards shipping larger features on our roadmap.
|
||||
|
||||
If you're looking for concrete ideas:
|
||||
|
||||
- Our [top-ranking issues](https://github.com/zed-industries/zed/issues/5393) based on votes by the community.
|
||||
- Our [public roadmap](https://zed.dev/roadmap) contains a rough outline of our near-term priorities for Zed.
|
||||
- Our [top-ranking issues](https://github.com/zed-industries/zed/issues/5393) based on votes by the community.
|
||||
|
||||
## Sending changes
|
||||
For adding themes or support for a new language to Zed, check out our [docs on developing extensions](https://zed.dev/docs/extensions/developing-extensions).
|
||||
|
||||
The Zed culture values working code and synchronous conversations over long
|
||||
discussion threads.
|
||||
## Proposing changes
|
||||
|
||||
The best way to get us to take a look at a proposed change is to send a pull
|
||||
request. We will get back to you (though this sometimes takes longer than we'd
|
||||
like, sorry).
|
||||
The best way to propose a change is to [start a discussion on our GitHub repository](https://github.com/zed-industries/zed/discussions).
|
||||
|
||||
Although we will take a look, we tend to only merge about half the PRs that are
|
||||
submitted. If you'd like your PR to have the best chance of being merged:
|
||||
First, write a short **problem statement**, which _clearly_ and _briefly_ describes the problem you want to solve independently from any specific solution. It doesn't need to be long or formal, but it's difficult to consider a solution in absence of a clear understanding of the problem.
|
||||
|
||||
- Include a clear description of what you're solving, and why it's important to you.
|
||||
- Include tests.
|
||||
- If it changes the UI, attach screenshots or screen recordings.
|
||||
Next, write a short **solution proposal**. How can the problem (or set of problems) you have stated above be addressed? What are the pros and cons of your approach? Again, keep it brief and informal. This isn't a specification, but rather a starting point for a conversation.
|
||||
|
||||
The internal advice for reviewers is as follows:
|
||||
By effectively engaging with the Zed team and community early in your process, we're better positioned to give you feedback and understand your pull request once you open it. If the first thing we see from you is a big changeset, we're much less likely to respond to it in a timely manner.
|
||||
|
||||
- If the fix/feature is obviously great, and the code is great. Hit merge.
|
||||
- If the fix/feature is obviously great, and the code is nearly great. Send PR comments, or offer to pair to get things perfect.
|
||||
- If the fix/feature is not obviously great, or the code needs rewriting from scratch. Close the PR with a thank you and some explanation.
|
||||
## Pair programming
|
||||
|
||||
If you need more feedback from us: the best way is to be responsive to
|
||||
Github comments, or to offer up time to pair with us.
|
||||
We plan to set aside time each week to pair program with contributors on promising pull requests in Zed. This will be an experiment. We tend to prefer pairing over async code review on our team, and we'd like to see how well it works in an open source setting. If we're finding it difficult to get on the same page with async review, we may ask you to pair with us if you're open to it. The closer a contribution is to the goals outlined in our roadmap, the more likely we'll be to spend time pairing on it.
|
||||
|
||||
If you are making a larger change, or need advice on how to finish the change
|
||||
you're making, please open the PR early. We would love to help you get
|
||||
things right, and it's often easier to see how to solve a problem before the
|
||||
diff gets too big.
|
||||
## Tips to improve the chances of your PR getting reviewed and merged
|
||||
|
||||
## Things we will (probably) not merge
|
||||
- Discuss your plans ahead of time with the team
|
||||
- Small, focused, incremental pull requests are much easier to review
|
||||
- Spend time explaining your changes in the pull request body
|
||||
- Add test coverage and documentation
|
||||
- Choose tasks that align with our roadmap
|
||||
- Pair with us and watch us code to learn the codebase
|
||||
- Low effort PRs, such as those that just re-arrange syntax, won't be merged without a compelling justification
|
||||
|
||||
Although there are few hard and fast rules, typically we don't merge:
|
||||
## File icons
|
||||
|
||||
- Anything that can be provided by an extension. For example a new language, or theme. For adding themes or support for a new language to Zed, check out our [docs on developing extensions](https://zed.dev/docs/extensions/developing-extensions).
|
||||
- New file icons. Zed's default icon theme consists of icons that are hand-designed to fit together in a cohesive manner, please don't submit PRs with off-the-shelf SVGs.
|
||||
- Giant refactorings.
|
||||
- Non-trivial changes with no tests.
|
||||
- Stylistic code changes that do not alter any app logic. Reducing allocations, removing `.unwrap()`s, fixing typos is great; making code "more readable" — maybe not so much.
|
||||
- Features where (in our subjective opinion) the extra complexity isn't worth it for the number of people who will benefit.
|
||||
- Anything that seems completely AI generated.
|
||||
Zed's default icon theme consists of icons that are hand-designed to fit together in a cohesive manner.
|
||||
|
||||
We do not accept PRs for file icons that are just an off-the-shelf SVG taken from somewhere else.
|
||||
|
||||
### Adding new icons to the Zed icon theme
|
||||
|
||||
If you would like to add a new icon to the Zed icon theme, [open a Discussion](https://github.com/zed-industries/zed/discussions/new?category=ux-and-design) and we can work with you on getting an icon designed and added to Zed.
|
||||
|
||||
## Bird's-eye view of Zed
|
||||
|
||||
We suggest you keep the [Zed glossary](docs/src/development/glossary.md) at your side when starting out. It lists and explains some of the structures and terms you will see throughout the codebase.
|
||||
|
||||
Zed is made up of several smaller crates - let's go over those you're most likely to interact with:
|
||||
|
||||
- [`gpui`](/crates/gpui) is a GPU-accelerated UI framework which provides all of the building blocks for Zed. **We recommend familiarizing yourself with the root level GPUI documentation.**
|
||||
|
||||
4543
Cargo.lock
generated
137
Cargo.toml
@@ -35,7 +35,6 @@ members = [
|
||||
"crates/cloud_api_client",
|
||||
"crates/cloud_api_types",
|
||||
"crates/cloud_llm_client",
|
||||
"crates/cloud_zeta2_prompt",
|
||||
"crates/collab",
|
||||
"crates/collab_ui",
|
||||
"crates/collections",
|
||||
@@ -53,13 +52,8 @@ members = [
|
||||
"crates/debugger_tools",
|
||||
"crates/debugger_ui",
|
||||
"crates/deepseek",
|
||||
"crates/denoise",
|
||||
"crates/diagnostics",
|
||||
"crates/docs_preprocessor",
|
||||
"crates/edit_prediction",
|
||||
"crates/edit_prediction_button",
|
||||
"crates/edit_prediction_context",
|
||||
"crates/zeta2_tools",
|
||||
"crates/editor",
|
||||
"crates/eval",
|
||||
"crates/explorer_command_injector",
|
||||
@@ -88,20 +82,20 @@ members = [
|
||||
"crates/http_client_tls",
|
||||
"crates/icons",
|
||||
"crates/image_viewer",
|
||||
"crates/edit_prediction",
|
||||
"crates/edit_prediction_button",
|
||||
"crates/inspector_ui",
|
||||
"crates/install_cli",
|
||||
"crates/jj",
|
||||
"crates/jj_ui",
|
||||
"crates/journal",
|
||||
"crates/json_schema_store",
|
||||
"crates/keymap_editor",
|
||||
"crates/language",
|
||||
"crates/language_extension",
|
||||
"crates/language_model",
|
||||
"crates/language_models",
|
||||
"crates/language_onboarding",
|
||||
"crates/language_selector",
|
||||
"crates/language_tools",
|
||||
"crates/languages",
|
||||
"crates/line_ending_selector",
|
||||
"crates/livekit_api",
|
||||
"crates/livekit_client",
|
||||
"crates/lmstudio",
|
||||
@@ -136,7 +130,6 @@ members = [
|
||||
"crates/refineable",
|
||||
"crates/refineable/derive_refineable",
|
||||
"crates/release_channel",
|
||||
"crates/scheduler",
|
||||
"crates/remote",
|
||||
"crates/remote_server",
|
||||
"crates/repl",
|
||||
@@ -147,10 +140,10 @@ members = [
|
||||
"crates/rules_library",
|
||||
"crates/schema_generator",
|
||||
"crates/search",
|
||||
"crates/semantic_index",
|
||||
"crates/semantic_version",
|
||||
"crates/session",
|
||||
"crates/settings",
|
||||
"crates/settings_macros",
|
||||
"crates/settings_profile_selector",
|
||||
"crates/settings_ui",
|
||||
"crates/snippet",
|
||||
@@ -163,10 +156,9 @@ members = [
|
||||
"crates/streaming_diff",
|
||||
"crates/sum_tree",
|
||||
"crates/supermaven",
|
||||
"crates/supermaven_api",
|
||||
"crates/codestral",
|
||||
"crates/svg_preview",
|
||||
"crates/system_specs",
|
||||
"crates/supermaven_api",
|
||||
"crates/svg_preview",
|
||||
"crates/tab_switcher",
|
||||
"crates/task",
|
||||
"crates/tasks_ui",
|
||||
@@ -199,9 +191,7 @@ members = [
|
||||
"crates/x_ai",
|
||||
"crates/zed",
|
||||
"crates/zed_actions",
|
||||
"crates/zed_env_vars",
|
||||
"crates/zeta",
|
||||
"crates/zeta2",
|
||||
"crates/zeta_cli",
|
||||
"crates/zlog",
|
||||
"crates/zlog_settings",
|
||||
@@ -213,14 +203,16 @@ members = [
|
||||
"extensions/glsl",
|
||||
"extensions/html",
|
||||
"extensions/proto",
|
||||
"extensions/ruff",
|
||||
"extensions/slash-commands-example",
|
||||
"extensions/snippets",
|
||||
"extensions/test-extension",
|
||||
"extensions/toml",
|
||||
|
||||
#
|
||||
# Tooling
|
||||
#
|
||||
|
||||
"tooling/perf",
|
||||
"tooling/workspace-hack",
|
||||
"tooling/xtask",
|
||||
]
|
||||
@@ -271,10 +263,9 @@ clock = { path = "crates/clock" }
|
||||
cloud_api_client = { path = "crates/cloud_api_client" }
|
||||
cloud_api_types = { path = "crates/cloud_api_types" }
|
||||
cloud_llm_client = { path = "crates/cloud_llm_client" }
|
||||
cloud_zeta2_prompt = { path = "crates/cloud_zeta2_prompt" }
|
||||
collab = { path = "crates/collab" }
|
||||
collab_ui = { path = "crates/collab_ui" }
|
||||
collections = { path = "crates/collections", version = "0.1.0" }
|
||||
collections = { path = "crates/collections" }
|
||||
command_palette = { path = "crates/command_palette" }
|
||||
command_palette_hooks = { path = "crates/command_palette_hooks" }
|
||||
component = { path = "crates/component" }
|
||||
@@ -282,7 +273,6 @@ context_server = { path = "crates/context_server" }
|
||||
copilot = { path = "crates/copilot" }
|
||||
crashes = { path = "crates/crashes" }
|
||||
credentials_provider = { path = "crates/credentials_provider" }
|
||||
crossbeam = "0.8.4"
|
||||
dap = { path = "crates/dap" }
|
||||
dap_adapters = { path = "crates/dap_adapters" }
|
||||
db = { path = "crates/db" }
|
||||
@@ -290,7 +280,6 @@ debug_adapter_extension = { path = "crates/debug_adapter_extension" }
|
||||
debugger_tools = { path = "crates/debugger_tools" }
|
||||
debugger_ui = { path = "crates/debugger_ui" }
|
||||
deepseek = { path = "crates/deepseek" }
|
||||
derive_refineable = { path = "crates/refineable/derive_refineable" }
|
||||
diagnostics = { path = "crates/diagnostics" }
|
||||
editor = { path = "crates/editor" }
|
||||
extension = { path = "crates/extension" }
|
||||
@@ -308,7 +297,9 @@ git_hosting_providers = { path = "crates/git_hosting_providers" }
|
||||
git_ui = { path = "crates/git_ui" }
|
||||
go_to_line = { path = "crates/go_to_line" }
|
||||
google_ai = { path = "crates/google_ai" }
|
||||
gpui = { path = "crates/gpui", default-features = false }
|
||||
gpui = { path = "crates/gpui", default-features = false, features = [
|
||||
"http_client",
|
||||
] }
|
||||
gpui_macros = { path = "crates/gpui_macros" }
|
||||
gpui_tokio = { path = "crates/gpui_tokio" }
|
||||
html_to_markdown = { path = "crates/html_to_markdown" }
|
||||
@@ -318,22 +309,18 @@ icons = { path = "crates/icons" }
|
||||
image_viewer = { path = "crates/image_viewer" }
|
||||
edit_prediction = { path = "crates/edit_prediction" }
|
||||
edit_prediction_button = { path = "crates/edit_prediction_button" }
|
||||
edit_prediction_context = { path = "crates/edit_prediction_context" }
|
||||
zeta2_tools = { path = "crates/zeta2_tools" }
|
||||
inspector_ui = { path = "crates/inspector_ui" }
|
||||
install_cli = { path = "crates/install_cli" }
|
||||
jj = { path = "crates/jj" }
|
||||
jj_ui = { path = "crates/jj_ui" }
|
||||
journal = { path = "crates/journal" }
|
||||
json_schema_store = { path = "crates/json_schema_store" }
|
||||
keymap_editor = { path = "crates/keymap_editor" }
|
||||
language = { path = "crates/language" }
|
||||
language_extension = { path = "crates/language_extension" }
|
||||
language_model = { path = "crates/language_model" }
|
||||
language_models = { path = "crates/language_models" }
|
||||
language_onboarding = { path = "crates/language_onboarding" }
|
||||
language_selector = { path = "crates/language_selector" }
|
||||
language_tools = { path = "crates/language_tools" }
|
||||
languages = { path = "crates/languages" }
|
||||
line_ending_selector = { path = "crates/line_ending_selector" }
|
||||
livekit_api = { path = "crates/livekit_api" }
|
||||
livekit_client = { path = "crates/livekit_client" }
|
||||
lmstudio = { path = "crates/lmstudio" }
|
||||
@@ -358,7 +345,6 @@ outline = { path = "crates/outline" }
|
||||
outline_panel = { path = "crates/outline_panel" }
|
||||
panel = { path = "crates/panel" }
|
||||
paths = { path = "crates/paths" }
|
||||
perf = { path = "tooling/perf" }
|
||||
picker = { path = "crates/picker" }
|
||||
plugin = { path = "crates/plugin" }
|
||||
plugin_macros = { path = "crates/plugin_macros" }
|
||||
@@ -372,21 +358,20 @@ proto = { path = "crates/proto" }
|
||||
recent_projects = { path = "crates/recent_projects" }
|
||||
refineable = { path = "crates/refineable" }
|
||||
release_channel = { path = "crates/release_channel" }
|
||||
scheduler = { path = "crates/scheduler" }
|
||||
remote = { path = "crates/remote" }
|
||||
remote_server = { path = "crates/remote_server" }
|
||||
repl = { path = "crates/repl" }
|
||||
reqwest_client = { path = "crates/reqwest_client" }
|
||||
rich_text = { path = "crates/rich_text" }
|
||||
rodio = { git = "https://github.com/RustAudio/rodio" }
|
||||
rodio = { version = "0.21.1", default-features = false }
|
||||
rope = { path = "crates/rope" }
|
||||
rpc = { path = "crates/rpc" }
|
||||
rules_library = { path = "crates/rules_library" }
|
||||
search = { path = "crates/search" }
|
||||
semantic_index = { path = "crates/semantic_index" }
|
||||
semantic_version = { path = "crates/semantic_version" }
|
||||
session = { path = "crates/session" }
|
||||
settings = { path = "crates/settings" }
|
||||
settings_macros = { path = "crates/settings_macros" }
|
||||
settings_ui = { path = "crates/settings_ui" }
|
||||
snippet = { path = "crates/snippet" }
|
||||
snippet_provider = { path = "crates/snippet_provider" }
|
||||
@@ -399,7 +384,6 @@ streaming_diff = { path = "crates/streaming_diff" }
|
||||
sum_tree = { path = "crates/sum_tree" }
|
||||
supermaven = { path = "crates/supermaven" }
|
||||
supermaven_api = { path = "crates/supermaven_api" }
|
||||
codestral = { path = "crates/codestral" }
|
||||
system_specs = { path = "crates/system_specs" }
|
||||
tab_switcher = { path = "crates/tab_switcher" }
|
||||
task = { path = "crates/task" }
|
||||
@@ -434,9 +418,7 @@ worktree = { path = "crates/worktree" }
|
||||
x_ai = { path = "crates/x_ai" }
|
||||
zed = { path = "crates/zed" }
|
||||
zed_actions = { path = "crates/zed_actions" }
|
||||
zed_env_vars = { path = "crates/zed_env_vars" }
|
||||
zeta = { path = "crates/zeta" }
|
||||
zeta2 = { path = "crates/zeta2" }
|
||||
zlog = { path = "crates/zlog" }
|
||||
zlog_settings = { path = "crates/zlog_settings" }
|
||||
|
||||
@@ -444,9 +426,9 @@ zlog_settings = { path = "crates/zlog_settings" }
|
||||
# External crates
|
||||
#
|
||||
|
||||
agent-client-protocol = { version = "0.4.3", features = ["unstable"] }
|
||||
agent-client-protocol = { path = "../agent-client-protocol" }
|
||||
aho-corasick = "1.1"
|
||||
alacritty_terminal = "0.25.1-rc1"
|
||||
alacritty_terminal = { git = "https://github.com/zed-industries/alacritty.git", branch = "add-hush-login-flag" }
|
||||
any_vec = "0.14"
|
||||
anyhow = "1.0.86"
|
||||
arrayvec = { version = "0.7.4", features = ["serde"] }
|
||||
@@ -458,7 +440,6 @@ async-fs = "2.1"
|
||||
async-pipe = { git = "https://github.com/zed-industries/async-pipe-rs", rev = "82d00a04211cf4e1236029aa03e6b6ce2a74c553" }
|
||||
async-recursion = "1.0.0"
|
||||
async-tar = "0.5.0"
|
||||
async-task = "4.7"
|
||||
async-trait = "0.1"
|
||||
async-tungstenite = "0.29.1"
|
||||
async_zip = { version = "0.0.17", features = ["deflate", "deflate64"] }
|
||||
@@ -471,17 +452,16 @@ aws-sdk-bedrockruntime = { version = "1.80.0", features = [
|
||||
] }
|
||||
aws-smithy-runtime-api = { version = "1.7.4", features = ["http-1x", "client"] }
|
||||
aws-smithy-types = { version = "1.3.0", features = ["http-body-1-x"] }
|
||||
backtrace = "0.3"
|
||||
base64 = "0.22"
|
||||
bincode = "1.2.1"
|
||||
bitflags = "2.6.0"
|
||||
blade-graphics = { version = "0.7.0" }
|
||||
blade-macros = { version = "0.3.0" }
|
||||
blade-util = { version = "0.3.0" }
|
||||
blade-graphics = { git = "https://github.com/kvark/blade", rev = "e0ec4e720957edd51b945b64dd85605ea54bcfe5" }
|
||||
blade-macros = { git = "https://github.com/kvark/blade", rev = "e0ec4e720957edd51b945b64dd85605ea54bcfe5" }
|
||||
blade-util = { git = "https://github.com/kvark/blade", rev = "e0ec4e720957edd51b945b64dd85605ea54bcfe5" }
|
||||
blake3 = "1.5.3"
|
||||
bytes = "1.0"
|
||||
cargo_metadata = "0.19"
|
||||
cargo_toml = "0.21"
|
||||
cfg-if = "1.0.3"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
ciborium = "0.2"
|
||||
circular-buffer = "1.0"
|
||||
@@ -514,7 +494,6 @@ futures-lite = "1.13"
|
||||
git2 = { version = "0.20.1", default-features = false }
|
||||
globset = "0.4"
|
||||
handlebars = "4.3"
|
||||
hashbrown = "0.15.3"
|
||||
heck = "0.5"
|
||||
heed = { version = "0.21.0", features = ["read-txn-no-tls"] }
|
||||
hex = "0.4.3"
|
||||
@@ -530,6 +509,7 @@ indexmap = { version = "2.7.0", features = ["serde"] }
|
||||
indoc = "2"
|
||||
inventory = "0.3.19"
|
||||
itertools = "0.14.0"
|
||||
jj-lib = { git = "https://github.com/jj-vcs/jj", rev = "e18eb8e05efaa153fad5ef46576af145bba1807f" }
|
||||
json_dotpath = "1.1"
|
||||
jsonschema = "0.30.0"
|
||||
jsonwebtoken = "9.3"
|
||||
@@ -539,7 +519,7 @@ libc = "0.2"
|
||||
libsqlite3-sys = { version = "0.30.1", features = ["bundled"] }
|
||||
linkify = "0.10.0"
|
||||
log = { version = "0.4.16", features = ["kv_unstable_serde", "serde"] }
|
||||
lsp-types = { git = "https://github.com/zed-industries/lsp-types", rev = "0874f8742fe55b4dc94308c1e3c0069710d8eeaf" }
|
||||
lsp-types = { git = "https://github.com/zed-industries/lsp-types", rev = "39f629bdd03d59abd786ed9fc27e8bca02c0c0ec" }
|
||||
mach2 = "0.5"
|
||||
markup5ever_rcdom = "0.3.0"
|
||||
metal = "0.29"
|
||||
@@ -550,33 +530,7 @@ nanoid = "0.4"
|
||||
nbformat = { git = "https://github.com/ConradIrwin/runtimed", rev = "7130c804216b6914355d15d0b91ea91f6babd734" }
|
||||
nix = "0.29"
|
||||
num-format = "0.4.4"
|
||||
num-traits = "0.2"
|
||||
objc = "0.2"
|
||||
objc2-foundation = { version = "0.3", default-features = false, features = [
|
||||
"NSArray",
|
||||
"NSAttributedString",
|
||||
"NSBundle",
|
||||
"NSCoder",
|
||||
"NSData",
|
||||
"NSDate",
|
||||
"NSDictionary",
|
||||
"NSEnumerator",
|
||||
"NSError",
|
||||
"NSGeometry",
|
||||
"NSNotification",
|
||||
"NSNull",
|
||||
"NSObjCRuntime",
|
||||
"NSObject",
|
||||
"NSProcessInfo",
|
||||
"NSRange",
|
||||
"NSRunLoop",
|
||||
"NSString",
|
||||
"NSURL",
|
||||
"NSUndoManager",
|
||||
"NSValue",
|
||||
"objc2-core-foundation",
|
||||
"std"
|
||||
] }
|
||||
open = "5.0.0"
|
||||
ordered-float = "2.1.1"
|
||||
palette = { version = "0.7.5", default-features = false, features = ["std"] }
|
||||
@@ -592,7 +546,6 @@ pet-fs = { git = "https://github.com/microsoft/python-environment-tools.git", re
|
||||
pet-pixi = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "845945b830297a50de0e24020b980a65e4820559" }
|
||||
pet-poetry = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "845945b830297a50de0e24020b980a65e4820559" }
|
||||
pet-reporter = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "845945b830297a50de0e24020b980a65e4820559" }
|
||||
pet-virtualenv = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "845945b830297a50de0e24020b980a65e4820559" }
|
||||
portable-pty = "0.9.0"
|
||||
postage = { version = "0.5", features = ["futures-traits"] }
|
||||
pretty_assertions = { version = "1.3.0", features = ["unstable"] }
|
||||
@@ -603,12 +556,11 @@ prost-build = "0.9"
|
||||
prost-types = "0.9"
|
||||
pulldown-cmark = { version = "0.12.0", default-features = false }
|
||||
quote = "1.0.9"
|
||||
rand = "0.9"
|
||||
rand = "0.8.5"
|
||||
rayon = "1.8"
|
||||
ref-cast = "1.0.24"
|
||||
regex = "1.5"
|
||||
# WARNING: If you change this, you must also publish a new version of zed-reqwest to crates.io
|
||||
reqwest = { git = "https://github.com/zed-industries/reqwest.git", rev = "c15662463bda39148ba154100dd44d3fba5873a4", default-features = false, features = [
|
||||
reqwest = { git = "https://github.com/zed-industries/reqwest.git", rev = "951c770a32f1998d6e999cef3e59e0013e6c4415", default-features = false, features = [
|
||||
"charset",
|
||||
"http2",
|
||||
"macos-system-configuration",
|
||||
@@ -616,34 +568,32 @@ reqwest = { git = "https://github.com/zed-industries/reqwest.git", rev = "c15662
|
||||
"rustls-tls-native-roots",
|
||||
"socks",
|
||||
"stream",
|
||||
], package = "zed-reqwest", version = "0.12.15-zed" }
|
||||
] }
|
||||
rsa = "0.9.6"
|
||||
runtimelib = { git = "https://github.com/ConradIrwin/runtimed", rev = "7130c804216b6914355d15d0b91ea91f6babd734", default-features = false, features = [
|
||||
"async-dispatcher-runtime",
|
||||
] }
|
||||
rust-embed = { version = "8.4", features = ["include-exclude"] }
|
||||
rustc-demangle = "0.1.23"
|
||||
rustc-hash = "2.1.0"
|
||||
rustls = { version = "0.23.26" }
|
||||
rustls-platform-verifier = "0.5.0"
|
||||
# WARNING: If you change this, you must also publish a new version of zed-scap to crates.io
|
||||
scap = { git = "https://github.com/zed-industries/scap", rev = "4afea48c3b002197176fb19cd0f9b180dd36eaac", default-features = false, package = "zed-scap", version = "0.0.8-zed" }
|
||||
scap = { git = "https://github.com/zed-industries/scap", rev = "808aa5c45b41e8f44729d02e38fd00a2fe2722e7", default-features = false }
|
||||
schemars = { version = "1.0", features = ["indexmap2"] }
|
||||
semver = "1.0"
|
||||
serde = { version = "1.0.221", features = ["derive", "rc"] }
|
||||
serde_json = { version = "1.0.144", features = ["preserve_order", "raw_value"] }
|
||||
serde = { version = "1.0", features = ["derive", "rc"] }
|
||||
serde_derive = { version = "1.0", features = ["deserialize_in_place"] }
|
||||
serde_json = { version = "1.0", features = ["preserve_order", "raw_value"] }
|
||||
serde_json_lenient = { version = "0.2", features = [
|
||||
"preserve_order",
|
||||
"raw_value",
|
||||
] }
|
||||
serde_path_to_error = "0.1.17"
|
||||
serde_repr = "0.1"
|
||||
serde_urlencoded = "0.7"
|
||||
serde_with = "3.4.0"
|
||||
sha2 = "0.10"
|
||||
shellexpand = "2.1.0"
|
||||
shlex = "1.3.0"
|
||||
simplelog = "0.12.2"
|
||||
slotmap = "1.0.6"
|
||||
smallvec = { version = "1.6", features = ["union"] }
|
||||
smol = "2.0"
|
||||
sqlformat = "0.2"
|
||||
@@ -652,9 +602,9 @@ streaming-iterator = "0.1"
|
||||
strsim = "0.11"
|
||||
strum = { version = "0.27.0", features = ["derive"] }
|
||||
subtle = "2.5.0"
|
||||
syn = { version = "2.0.101", features = ["full", "extra-traits", "visit-mut"] }
|
||||
syn = { version = "2.0.101", features = ["full", "extra-traits"] }
|
||||
sys-locale = "0.3.1"
|
||||
sysinfo = "0.37.0"
|
||||
sysinfo = "0.31.0"
|
||||
take-until = "0.2.0"
|
||||
tempfile = "3.20.0"
|
||||
thiserror = "2.0.12"
|
||||
@@ -670,9 +620,8 @@ tiny_http = "0.8"
|
||||
tokio = { version = "1" }
|
||||
tokio-tungstenite = { version = "0.26", features = ["__rustls-tls"] }
|
||||
toml = "0.8"
|
||||
toml_edit = { version = "0.22", default-features = false, features = ["display", "parse", "serde"] }
|
||||
tower-http = "0.4.4"
|
||||
tree-sitter = { version = "0.25.10", features = ["wasm"] }
|
||||
tree-sitter = { version = "0.25.6", features = ["wasm"] }
|
||||
tree-sitter-bash = "0.25.0"
|
||||
tree-sitter-c = "0.23"
|
||||
tree-sitter-cpp = { git = "https://github.com/tree-sitter/tree-sitter-cpp", rev = "5cb9b693cfd7bfacab1d9ff4acac1a4150700609" }
|
||||
@@ -689,11 +638,11 @@ tree-sitter-html = "0.23"
|
||||
tree-sitter-jsdoc = "0.23"
|
||||
tree-sitter-json = "0.24"
|
||||
tree-sitter-md = { git = "https://github.com/tree-sitter-grammars/tree-sitter-markdown", rev = "9a23c1a96c0513d8fc6520972beedd419a973539" }
|
||||
tree-sitter-python = "0.25"
|
||||
tree-sitter-python = { git = "https://github.com/zed-industries/tree-sitter-python", rev = "218fcbf3fda3d029225f3dec005cb497d111b35e" }
|
||||
tree-sitter-regex = "0.24"
|
||||
tree-sitter-ruby = "0.23"
|
||||
tree-sitter-rust = "0.24"
|
||||
tree-sitter-typescript = { git = "https://github.com/zed-industries/tree-sitter-typescript", rev = "e2c53597d6a5d9cf7bbe8dccde576fe1e46c5899" } # https://github.com/tree-sitter/tree-sitter-typescript/pull/347
|
||||
tree-sitter-typescript = "0.23"
|
||||
tree-sitter-yaml = { git = "https://github.com/zed-industries/tree-sitter-yaml", rev = "baff0b51c64ef6a1fb1f8390f3ad6015b83ec13a" }
|
||||
unicase = "2.6"
|
||||
unicode-script = "0.5.7"
|
||||
@@ -720,7 +669,6 @@ windows-core = "0.61"
|
||||
wit-component = "0.221"
|
||||
workspace-hack = "0.1.0"
|
||||
yawc = "0.2.5"
|
||||
zeroize = "1.8"
|
||||
zstd = "0.11"
|
||||
|
||||
[workspace.dependencies.windows]
|
||||
@@ -743,11 +691,9 @@ features = [
|
||||
"Win32_Graphics_Dxgi_Common",
|
||||
"Win32_Graphics_Gdi",
|
||||
"Win32_Graphics_Imaging",
|
||||
"Win32_Graphics_Hlsl",
|
||||
"Win32_Networking_WinSock",
|
||||
"Win32_Security",
|
||||
"Win32_Security_Credentials",
|
||||
"Win32_Security_Cryptography",
|
||||
"Win32_Storage_FileSystem",
|
||||
"Win32_System_Com",
|
||||
"Win32_System_Com_StructuredStorage",
|
||||
@@ -816,7 +762,6 @@ image_viewer = { codegen-units = 1 }
|
||||
edit_prediction_button = { codegen-units = 1 }
|
||||
install_cli = { codegen-units = 1 }
|
||||
journal = { codegen-units = 1 }
|
||||
json_schema_store = { codegen-units = 1 }
|
||||
lmstudio = { codegen-units = 1 }
|
||||
menu = { codegen-units = 1 }
|
||||
notifications = { codegen-units = 1 }
|
||||
@@ -868,7 +813,6 @@ todo = "deny"
|
||||
declare_interior_mutable_const = "deny"
|
||||
|
||||
redundant_clone = "deny"
|
||||
disallowed_methods = "deny"
|
||||
|
||||
# We currently do not restrict any style rules
|
||||
# as it slows down shipping code to Zed.
|
||||
@@ -898,9 +842,6 @@ too_many_arguments = "allow"
|
||||
# We often have large enum variants yet we rarely actually bother with splitting them up.
|
||||
large_enum_variant = "allow"
|
||||
|
||||
# Boolean expressions can be hard to read, requiring only the minimal form gets in the way
|
||||
nonminimal_bool = "allow"
|
||||
|
||||
[workspace.metadata.cargo-machete]
|
||||
ignored = [
|
||||
"bindgen",
|
||||
|
||||
2
Cross.toml
Normal file
@@ -0,0 +1,2 @@
|
||||
[build]
|
||||
dockerfile = "Dockerfile-cross"
|
||||
@@ -1,6 +1,6 @@
|
||||
# syntax = docker/dockerfile:1.2
|
||||
|
||||
FROM rust:1.90-bookworm as builder
|
||||
FROM rust:1.89-bookworm as builder
|
||||
WORKDIR app
|
||||
COPY . .
|
||||
|
||||
|
||||
17
Dockerfile-cross
Normal file
@@ -0,0 +1,17 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG CROSS_BASE_IMAGE
|
||||
FROM ${CROSS_BASE_IMAGE}
|
||||
WORKDIR /app
|
||||
ARG TZ=Etc/UTC \
|
||||
LANG=C.UTF-8 \
|
||||
LC_ALL=C.UTF-8 \
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
ENV CARGO_TERM_COLOR=always
|
||||
|
||||
COPY script/install-mold script/
|
||||
RUN ./script/install-mold "2.34.0"
|
||||
COPY script/remote-server script/
|
||||
RUN ./script/remote-server
|
||||
|
||||
COPY . .
|
||||
@@ -1,3 +1,9 @@
|
||||
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M13.2806 4.66818L8.26042 1.76982C8.09921 1.67673 7.9003 1.67673 7.73909 1.76982L2.71918 4.66818C2.58367 4.74642 2.5 4.89112 2.5 5.04785V10.8924C2.5 11.0489 2.58367 11.1938 2.71918 11.2721L7.73934 14.1704C7.90054 14.2635 8.09946 14.2635 8.26066 14.1704L13.2808 11.2721C13.4163 11.1938 13.5 11.0491 13.5 10.8924V5.04785C13.5 4.89136 13.4163 4.74642 13.2808 4.66818H13.2806ZM12.9653 5.28212L8.11901 13.676C8.08626 13.7326 7.99977 13.7095 7.99977 13.6439V8.14771C7.99977 8.03788 7.94107 7.9363 7.84586 7.88115L3.08613 5.13317C3.02957 5.10041 3.05266 5.0139 3.11818 5.0139H12.8106C12.9483 5.0139 13.0343 5.1631 12.9655 5.28236H12.9653V5.28212Z" fill="#C4CAD4"/>
|
||||
<path opacity="0.6" d="M3.5 11V5.5L8.5 8L3.5 11Z" fill="black"/>
|
||||
<path opacity="0.4" d="M8.5 14L3.5 11L8.5 8V14Z" fill="black"/>
|
||||
<path opacity="0.6" d="M8.5 5.5H3.5L8.5 2.5L8.5 5.5Z" fill="black"/>
|
||||
<path opacity="0.8" d="M8.5 5.5V2.5L13.5 5.5H8.5Z" fill="black"/>
|
||||
<path opacity="0.2" d="M13.5 11L8.5 14L11 9.5L13.5 11Z" fill="black"/>
|
||||
<path opacity="0.5" d="M13.5 11L11 9.5L13.5 5V11Z" fill="black"/>
|
||||
<path d="M3.5 11V5L8.5 2.11325L13.5 5V11L8.5 13.8868L3.5 11Z" stroke="black"/>
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 769 B After Width: | Height: | Size: 583 B |
@@ -1,11 +0,0 @@
|
||||
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_3010_383)">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M3.71141 7.06133C3.76141 6.47267 3.78341 5.88133 3.81608 5.29133C4.10416 0.190201 11.896 0.190202 12.1841 5.29133C12.2174 5.898 12.2441 6.50333 12.3067 7.10733C12.6951 7.94202 14.3637 11.6214 13.4134 12.006C13.1894 12.096 12.8041 11.7227 12.3694 11.052C12.207 11.9614 11.7273 12.8132 11.0587 13.4467C11.7441 13.68 12.3334 13.998 12.3334 14.3333C12.3334 14.9176 3.66675 14.9257 3.66675 14.3333C3.66675 13.998 4.25608 13.68 4.94141 13.4467C4.26191 12.803 3.82279 11.9657 3.62408 11.056C3.19075 11.724 2.80608 12.096 2.58341 12.006C1.626 11.6185 3.31478 7.90684 3.71141 7.06133Z" stroke="#7B7B7B" stroke-width="1.2" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
<path d="M6.11822 6.6L7.68822 7.89C7.85822 8.03 8.12822 8.03 8.29822 7.89L9.86822 6.6C10.1382 6.38 9.94822 6 9.56822 6H6.42822C6.04822 6 5.85822 6.38 6.12822 6.6H6.11822Z" fill="#7B7B7B"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_3010_383">
|
||||
<rect width="16" height="16" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 1.1 KiB |
@@ -1,3 +0,0 @@
|
||||
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M10.1645 4.45825L5.20344 9.52074C4.98225 9.74193 4.85798 10.0419 4.85798 10.3548C4.85798 10.6676 4.98225 10.9676 5.20344 11.1888C5.42464 11.41 5.72464 11.5342 6.03746 11.5342C6.35028 11.5342 6.65028 11.41 6.87148 11.1888L11.8326 6.12629C12.2749 5.68397 12.5234 5.08407 12.5234 4.45854C12.5234 3.83302 12.2749 3.23311 11.8326 2.7908C11.3902 2.34849 10.7903 2.1 10.1648 2.1C9.53928 2.1 8.93938 2.34849 8.49707 2.7908L3.55663 7.83265C3.22373 8.16017 2.95897 8.55037 2.77762 8.98072C2.59628 9.41108 2.50193 9.87308 2.50003 10.3401C2.49813 10.8071 2.58871 11.2698 2.76654 11.7017C2.94438 12.1335 3.20595 12.5258 3.53618 12.856C3.8664 13.1863 4.25873 13.4478 4.69055 13.6257C5.12237 13.8035 5.58513 13.8941 6.05213 13.8922C6.51913 13.8903 6.98114 13.7959 7.41149 13.6146C7.84185 13.4332 8.23204 13.1685 8.55957 12.8356L13.5 7.79373" stroke="#C4CAD4" stroke-width="1.2" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 1.0 KiB |
@@ -1,4 +1 @@
|
||||
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M6.125 9.25001L3 6.125L6.125 3" stroke="#C4CAD4" stroke-width="1.2" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
<path d="M3 6.125H9.56251C10.0139 6.125 10.4609 6.21391 10.878 6.38666C11.295 6.55942 11.674 6.81262 11.9932 7.13182C12.3124 7.45102 12.5656 7.82997 12.7383 8.24703C12.9111 8.66408 13 9.11108 13 9.5625C13 10.0139 12.9111 10.4609 12.7383 10.878C12.5656 11.295 12.3124 11.674 11.9932 11.9932C11.674 12.3124 11.295 12.5656 10.878 12.7383C10.4609 12.9111 10.0139 13 9.56251 13H7.375" stroke="black" stroke-width="1.2" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
</svg>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="none"><path stroke="#000" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.2" d="M2.6 5v3.6h3.6"/><path stroke="#000" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.2" d="M13.4 11A5.4 5.4 0 0 0 8 5.6a5.4 5.4 0 0 0-3.6 1.38L2.6 8.6"/></svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 692 B After Width: | Height: | Size: 339 B |
|
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 14 KiB |
@@ -16,7 +16,6 @@
|
||||
"up": "menu::SelectPrevious",
|
||||
"enter": "menu::Confirm",
|
||||
"ctrl-enter": "menu::SecondaryConfirm",
|
||||
"ctrl-escape": "menu::Cancel",
|
||||
"ctrl-c": "menu::Cancel",
|
||||
"escape": "menu::Cancel",
|
||||
"alt-shift-enter": "menu::Restart",
|
||||
@@ -31,7 +30,6 @@
|
||||
"ctrl--": ["zed::DecreaseBufferFontSize", { "persist": false }],
|
||||
"ctrl-0": ["zed::ResetBufferFontSize", { "persist": false }],
|
||||
"ctrl-,": "zed::OpenSettings",
|
||||
"ctrl-alt-,": "zed::OpenSettingsFile",
|
||||
"ctrl-q": "zed::Quit",
|
||||
"f4": "debugger::Start",
|
||||
"shift-f5": "debugger::Stop",
|
||||
@@ -65,8 +63,8 @@
|
||||
"ctrl-k": "editor::CutToEndOfLine",
|
||||
"ctrl-k ctrl-q": "editor::Rewrap",
|
||||
"ctrl-k q": "editor::Rewrap",
|
||||
"ctrl-backspace": ["editor::DeleteToPreviousWordStart", { "ignore_newlines": false, "ignore_brackets": false }],
|
||||
"ctrl-delete": ["editor::DeleteToNextWordEnd", { "ignore_newlines": false, "ignore_brackets": false }],
|
||||
"ctrl-backspace": "editor::DeleteToPreviousWordStart",
|
||||
"ctrl-delete": "editor::DeleteToNextWordEnd",
|
||||
"cut": "editor::Cut",
|
||||
"shift-delete": "editor::Cut",
|
||||
"ctrl-x": "editor::Cut",
|
||||
@@ -172,7 +170,6 @@
|
||||
"context": "Markdown",
|
||||
"bindings": {
|
||||
"copy": "markdown::Copy",
|
||||
"ctrl-insert": "markdown::Copy",
|
||||
"ctrl-c": "markdown::Copy"
|
||||
}
|
||||
},
|
||||
@@ -248,10 +245,7 @@
|
||||
"ctrl-shift-e": "project_panel::ToggleFocus",
|
||||
"ctrl-shift-enter": "agent::ContinueThread",
|
||||
"super-ctrl-b": "agent::ToggleBurnMode",
|
||||
"alt-enter": "agent::ContinueWithBurnMode",
|
||||
"ctrl-y": "agent::AllowOnce",
|
||||
"ctrl-alt-y": "agent::AllowAlways",
|
||||
"ctrl-alt-z": "agent::RejectOnce"
|
||||
"alt-enter": "agent::ContinueWithBurnMode"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -264,7 +258,6 @@
|
||||
"context": "AgentPanel > Markdown",
|
||||
"bindings": {
|
||||
"copy": "markdown::CopyAsMarkdown",
|
||||
"ctrl-insert": "markdown::CopyAsMarkdown",
|
||||
"ctrl-c": "markdown::CopyAsMarkdown"
|
||||
}
|
||||
},
|
||||
@@ -332,12 +325,6 @@
|
||||
"enter": "agent::AcceptSuggestedContext"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "AcpThread > ModeSelector",
|
||||
"bindings": {
|
||||
"ctrl-enter": "menu::Confirm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "AcpThread > Editor && !use_modifier_to_send",
|
||||
"use_key_equivalents": true,
|
||||
@@ -355,8 +342,7 @@
|
||||
"ctrl-enter": "agent::Chat",
|
||||
"shift-ctrl-r": "agent::OpenAgentDiff",
|
||||
"ctrl-shift-y": "agent::KeepAll",
|
||||
"ctrl-shift-n": "agent::RejectAll",
|
||||
"shift-tab": "agent::CycleModeSelector"
|
||||
"ctrl-shift-n": "agent::RejectAll"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -370,8 +356,7 @@
|
||||
"bindings": {
|
||||
"new": "rules_library::NewRule",
|
||||
"ctrl-n": "rules_library::NewRule",
|
||||
"ctrl-shift-s": "rules_library::ToggleDefaultRule",
|
||||
"ctrl-w": "workspace::CloseWindow"
|
||||
"ctrl-shift-s": "rules_library::ToggleDefaultRule"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -464,8 +449,8 @@
|
||||
"ctrl-k ctrl-w": "workspace::CloseAllItemsAndPanes",
|
||||
"back": "pane::GoBack",
|
||||
"ctrl-alt--": "pane::GoBack",
|
||||
"forward": "pane::GoForward",
|
||||
"ctrl-alt-_": "pane::GoForward",
|
||||
"forward": "pane::GoForward",
|
||||
"ctrl-alt-g": "search::SelectNextMatch",
|
||||
"f3": "search::SelectNextMatch",
|
||||
"ctrl-alt-shift-g": "search::SelectPreviousMatch",
|
||||
@@ -498,8 +483,8 @@
|
||||
"alt-down": "editor::MoveLineDown",
|
||||
"ctrl-alt-shift-up": "editor::DuplicateLineUp",
|
||||
"ctrl-alt-shift-down": "editor::DuplicateLineDown",
|
||||
"alt-shift-right": "editor::SelectLargerSyntaxNode", // Expand selection
|
||||
"alt-shift-left": "editor::SelectSmallerSyntaxNode", // Shrink selection
|
||||
"alt-shift-right": "editor::SelectLargerSyntaxNode", // Expand Selection
|
||||
"alt-shift-left": "editor::SelectSmallerSyntaxNode", // Shrink Selection
|
||||
"ctrl-shift-l": "editor::SelectAllMatches", // Select all occurrences of current selection
|
||||
"ctrl-f2": "editor::SelectAllMatches", // Select all occurrences of current word
|
||||
"ctrl-d": ["editor::SelectNext", { "replace_newest": false }], // editor.action.addSelectionToNextFindMatch / find_under_expand
|
||||
@@ -527,15 +512,15 @@
|
||||
"ctrl-k ctrl-l": "editor::ToggleFold",
|
||||
"ctrl-k ctrl-[": "editor::FoldRecursive",
|
||||
"ctrl-k ctrl-]": "editor::UnfoldRecursive",
|
||||
"ctrl-k ctrl-1": "editor::FoldAtLevel_1",
|
||||
"ctrl-k ctrl-2": "editor::FoldAtLevel_2",
|
||||
"ctrl-k ctrl-3": "editor::FoldAtLevel_3",
|
||||
"ctrl-k ctrl-4": "editor::FoldAtLevel_4",
|
||||
"ctrl-k ctrl-5": "editor::FoldAtLevel_5",
|
||||
"ctrl-k ctrl-6": "editor::FoldAtLevel_6",
|
||||
"ctrl-k ctrl-7": "editor::FoldAtLevel_7",
|
||||
"ctrl-k ctrl-8": "editor::FoldAtLevel_8",
|
||||
"ctrl-k ctrl-9": "editor::FoldAtLevel_9",
|
||||
"ctrl-k ctrl-1": ["editor::FoldAtLevel", 1],
|
||||
"ctrl-k ctrl-2": ["editor::FoldAtLevel", 2],
|
||||
"ctrl-k ctrl-3": ["editor::FoldAtLevel", 3],
|
||||
"ctrl-k ctrl-4": ["editor::FoldAtLevel", 4],
|
||||
"ctrl-k ctrl-5": ["editor::FoldAtLevel", 5],
|
||||
"ctrl-k ctrl-6": ["editor::FoldAtLevel", 6],
|
||||
"ctrl-k ctrl-7": ["editor::FoldAtLevel", 7],
|
||||
"ctrl-k ctrl-8": ["editor::FoldAtLevel", 8],
|
||||
"ctrl-k ctrl-9": ["editor::FoldAtLevel", 9],
|
||||
"ctrl-k ctrl-0": "editor::FoldAll",
|
||||
"ctrl-k ctrl-j": "editor::UnfoldAll",
|
||||
"ctrl-space": "editor::ShowCompletions",
|
||||
@@ -595,7 +580,7 @@
|
||||
"ctrl-n": "workspace::NewFile",
|
||||
"shift-new": "workspace::NewWindow",
|
||||
"ctrl-shift-n": "workspace::NewWindow",
|
||||
"ctrl-`": "terminal_panel::Toggle",
|
||||
"ctrl-`": "terminal_panel::ToggleFocus",
|
||||
"f10": ["app_menu::OpenApplicationMenu", "Zed"],
|
||||
"alt-1": ["workspace::ActivatePane", 0],
|
||||
"alt-2": ["workspace::ActivatePane", 1],
|
||||
@@ -621,7 +606,7 @@
|
||||
"ctrl-shift-f": "pane::DeploySearch",
|
||||
"ctrl-shift-h": ["pane::DeploySearch", { "replace_enabled": true }],
|
||||
"ctrl-shift-t": "pane::ReopenClosedItem",
|
||||
"ctrl-k ctrl-s": "zed::OpenKeymap",
|
||||
"ctrl-k ctrl-s": "zed::OpenKeymapEditor",
|
||||
"ctrl-k ctrl-t": "theme_selector::Toggle",
|
||||
"ctrl-alt-super-p": "settings_profile_selector::Toggle",
|
||||
"ctrl-t": "project_symbols::Toggle",
|
||||
@@ -640,7 +625,6 @@
|
||||
"alt-save": "workspace::SaveAll",
|
||||
"ctrl-alt-s": "workspace::SaveAll",
|
||||
"ctrl-k m": "language_selector::Toggle",
|
||||
"ctrl-k ctrl-m": "toolchain::AddToolchain",
|
||||
"escape": "workspace::Unfollow",
|
||||
"ctrl-k ctrl-left": "workspace::ActivatePaneLeft",
|
||||
"ctrl-k ctrl-right": "workspace::ActivatePaneRight",
|
||||
@@ -651,9 +635,7 @@
|
||||
"ctrl-k shift-up": "workspace::SwapPaneUp",
|
||||
"ctrl-k shift-down": "workspace::SwapPaneDown",
|
||||
"ctrl-shift-x": "zed::Extensions",
|
||||
// All task parameters are captured and unchanged between reruns by default.
|
||||
// Use the `"reevaluate_context"` parameter to control this.
|
||||
"ctrl-shift-r": ["task::Rerun", { "reevaluate_context": false }],
|
||||
"ctrl-shift-r": "task::Rerun",
|
||||
"ctrl-alt-r": "task::Rerun",
|
||||
"alt-t": "task::Rerun",
|
||||
"alt-shift-t": "task::Spawn",
|
||||
@@ -1043,13 +1025,6 @@
|
||||
"tab": "channel_modal::ToggleMode"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "ToolchainSelector",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"ctrl-shift-a": "toolchain::AddToolchain"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "FileFinder || (FileFinder > Picker > Editor)",
|
||||
"bindings": {
|
||||
@@ -1077,12 +1052,6 @@
|
||||
"ctrl-backspace": "tab_switcher::CloseSelectedItem"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "StashList || (StashList > Picker > Editor)",
|
||||
"bindings": {
|
||||
"ctrl-shift-backspace": "stash_picker::DropStashItem"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "Terminal",
|
||||
"bindings": {
|
||||
@@ -1142,13 +1111,6 @@
|
||||
"ctrl-enter": "menu::Confirm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "ContextServerToolsModal",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"escape": "menu::Cancel"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "OnboardingAiConfigurationModal",
|
||||
"use_key_equivalents": true,
|
||||
@@ -1229,6 +1191,9 @@
|
||||
"context": "Onboarding",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"ctrl-1": "onboarding::ActivateBasicsPage",
|
||||
"ctrl-2": "onboarding::ActivateEditingPage",
|
||||
"ctrl-3": "onboarding::ActivateAISetupPage",
|
||||
"ctrl-enter": "onboarding::Finish",
|
||||
"alt-shift-l": "onboarding::SignIn",
|
||||
"alt-shift-a": "onboarding::OpenAccount"
|
||||
@@ -1240,44 +1205,5 @@
|
||||
"bindings": {
|
||||
"ctrl-shift-enter": "workspace::OpenWithSystem"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "SettingsWindow",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"ctrl-w": "workspace::CloseWindow",
|
||||
"escape": "workspace::CloseWindow",
|
||||
"ctrl-m": "settings_editor::Minimize",
|
||||
"ctrl-f": "search::FocusSearch",
|
||||
"left": "settings_editor::ToggleFocusNav",
|
||||
"ctrl-shift-e": "settings_editor::ToggleFocusNav",
|
||||
// todo(settings_ui): cut this down based on the max files and overflow UI
|
||||
"ctrl-1": ["settings_editor::FocusFile", 0],
|
||||
"ctrl-2": ["settings_editor::FocusFile", 1],
|
||||
"ctrl-3": ["settings_editor::FocusFile", 2],
|
||||
"ctrl-4": ["settings_editor::FocusFile", 3],
|
||||
"ctrl-5": ["settings_editor::FocusFile", 4],
|
||||
"ctrl-6": ["settings_editor::FocusFile", 5],
|
||||
"ctrl-7": ["settings_editor::FocusFile", 6],
|
||||
"ctrl-8": ["settings_editor::FocusFile", 7],
|
||||
"ctrl-9": ["settings_editor::FocusFile", 8],
|
||||
"ctrl-0": ["settings_editor::FocusFile", 9],
|
||||
"ctrl-pageup": "settings_editor::FocusPreviousFile",
|
||||
"ctrl-pagedown": "settings_editor::FocusNextFile"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "SettingsWindow > NavigationMenu",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"up": "settings_editor::FocusPreviousNavEntry",
|
||||
"down": "settings_editor::FocusNextNavEntry",
|
||||
"right": "settings_editor::ExpandNavEntry",
|
||||
"left": "settings_editor::CollapseNavEntry",
|
||||
"pageup": "settings_editor::FocusPreviousRootNavEntry",
|
||||
"pagedown": "settings_editor::FocusNextRootNavEntry",
|
||||
"home": "settings_editor::FocusFirstNavEntry",
|
||||
"end": "settings_editor::FocusLastNavEntry"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@@ -40,7 +40,6 @@
|
||||
"cmd--": ["zed::DecreaseBufferFontSize", { "persist": false }],
|
||||
"cmd-0": ["zed::ResetBufferFontSize", { "persist": false }],
|
||||
"cmd-,": "zed::OpenSettings",
|
||||
"cmd-alt-,": "zed::OpenSettingsFile",
|
||||
"cmd-q": "zed::Quit",
|
||||
"cmd-h": "zed::Hide",
|
||||
"alt-cmd-h": "zed::HideOthers",
|
||||
@@ -71,9 +70,9 @@
|
||||
"cmd-k q": "editor::Rewrap",
|
||||
"cmd-backspace": "editor::DeleteToBeginningOfLine",
|
||||
"cmd-delete": "editor::DeleteToEndOfLine",
|
||||
"alt-backspace": ["editor::DeleteToPreviousWordStart", { "ignore_newlines": false, "ignore_brackets": false }],
|
||||
"ctrl-w": ["editor::DeleteToPreviousWordStart", { "ignore_newlines": false, "ignore_brackets": false }],
|
||||
"alt-delete": ["editor::DeleteToNextWordEnd", { "ignore_newlines": false, "ignore_brackets": false }],
|
||||
"alt-backspace": "editor::DeleteToPreviousWordStart",
|
||||
"ctrl-w": "editor::DeleteToPreviousWordStart",
|
||||
"alt-delete": "editor::DeleteToNextWordEnd",
|
||||
"cmd-x": "editor::Cut",
|
||||
"cmd-c": "editor::Copy",
|
||||
"cmd-v": "editor::Paste",
|
||||
@@ -219,7 +218,7 @@
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "Editor && !agent_diff && !AgentPanel",
|
||||
"context": "Editor && !agent_diff",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"cmd-alt-z": "git::Restore",
|
||||
@@ -287,10 +286,7 @@
|
||||
"cmd-shift-e": "project_panel::ToggleFocus",
|
||||
"cmd-ctrl-b": "agent::ToggleBurnMode",
|
||||
"cmd-shift-enter": "agent::ContinueThread",
|
||||
"alt-enter": "agent::ContinueWithBurnMode",
|
||||
"cmd-y": "agent::AllowOnce",
|
||||
"cmd-alt-y": "agent::AllowAlways",
|
||||
"cmd-alt-z": "agent::RejectOnce"
|
||||
"alt-enter": "agent::ContinueWithBurnMode"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -382,12 +378,6 @@
|
||||
"ctrl--": "pane::GoBack"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "AcpThread > ModeSelector",
|
||||
"bindings": {
|
||||
"cmd-enter": "menu::Confirm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "AcpThread > Editor && !use_modifier_to_send",
|
||||
"use_key_equivalents": true,
|
||||
@@ -395,8 +385,7 @@
|
||||
"enter": "agent::Chat",
|
||||
"shift-ctrl-r": "agent::OpenAgentDiff",
|
||||
"cmd-shift-y": "agent::KeepAll",
|
||||
"cmd-shift-n": "agent::RejectAll",
|
||||
"shift-tab": "agent::CycleModeSelector"
|
||||
"cmd-shift-n": "agent::RejectAll"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -406,8 +395,7 @@
|
||||
"cmd-enter": "agent::Chat",
|
||||
"shift-ctrl-r": "agent::OpenAgentDiff",
|
||||
"cmd-shift-y": "agent::KeepAll",
|
||||
"cmd-shift-n": "agent::RejectAll",
|
||||
"shift-tab": "agent::CycleModeSelector"
|
||||
"cmd-shift-n": "agent::RejectAll"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -548,12 +536,8 @@
|
||||
"alt-down": "editor::MoveLineDown",
|
||||
"alt-shift-up": "editor::DuplicateLineUp",
|
||||
"alt-shift-down": "editor::DuplicateLineDown",
|
||||
"cmd-ctrl-left": "editor::SelectSmallerSyntaxNode", // Shrink selection
|
||||
"cmd-ctrl-right": "editor::SelectLargerSyntaxNode", // Expand selection
|
||||
"cmd-ctrl-up": "editor::SelectPreviousSyntaxNode", // Move selection up
|
||||
"ctrl-shift-right": "editor::SelectLargerSyntaxNode", // Expand selection (VSCode version)
|
||||
"ctrl-shift-left": "editor::SelectSmallerSyntaxNode", // Shrink selection (VSCode version)
|
||||
"cmd-ctrl-down": "editor::SelectNextSyntaxNode", // Move selection down
|
||||
"ctrl-shift-right": "editor::SelectLargerSyntaxNode", // Expand Selection
|
||||
"ctrl-shift-left": "editor::SelectSmallerSyntaxNode", // Shrink Selection
|
||||
"cmd-d": ["editor::SelectNext", { "replace_newest": false }], // editor.action.addSelectionToNextFindMatch / find_under_expand
|
||||
"cmd-shift-l": "editor::SelectAllMatches", // Select all occurrences of current selection
|
||||
"cmd-f2": "editor::SelectAllMatches", // Select all occurrences of current word
|
||||
@@ -582,15 +566,15 @@
|
||||
"cmd-k cmd-l": "editor::ToggleFold",
|
||||
"cmd-k cmd-[": "editor::FoldRecursive",
|
||||
"cmd-k cmd-]": "editor::UnfoldRecursive",
|
||||
"cmd-k cmd-1": "editor::FoldAtLevel_1",
|
||||
"cmd-k cmd-2": "editor::FoldAtLevel_2",
|
||||
"cmd-k cmd-3": "editor::FoldAtLevel_3",
|
||||
"cmd-k cmd-4": "editor::FoldAtLevel_4",
|
||||
"cmd-k cmd-5": "editor::FoldAtLevel_5",
|
||||
"cmd-k cmd-6": "editor::FoldAtLevel_6",
|
||||
"cmd-k cmd-7": "editor::FoldAtLevel_7",
|
||||
"cmd-k cmd-8": "editor::FoldAtLevel_8",
|
||||
"cmd-k cmd-9": "editor::FoldAtLevel_9",
|
||||
"cmd-k cmd-1": ["editor::FoldAtLevel", 1],
|
||||
"cmd-k cmd-2": ["editor::FoldAtLevel", 2],
|
||||
"cmd-k cmd-3": ["editor::FoldAtLevel", 3],
|
||||
"cmd-k cmd-4": ["editor::FoldAtLevel", 4],
|
||||
"cmd-k cmd-5": ["editor::FoldAtLevel", 5],
|
||||
"cmd-k cmd-6": ["editor::FoldAtLevel", 6],
|
||||
"cmd-k cmd-7": ["editor::FoldAtLevel", 7],
|
||||
"cmd-k cmd-8": ["editor::FoldAtLevel", 8],
|
||||
"cmd-k cmd-9": ["editor::FoldAtLevel", 9],
|
||||
"cmd-k cmd-0": "editor::FoldAll",
|
||||
"cmd-k cmd-j": "editor::UnfoldAll",
|
||||
// Using `ctrl-space` / `ctrl-shift-space` in Zed requires disabling the macOS global shortcut.
|
||||
@@ -665,7 +649,7 @@
|
||||
"alt-shift-enter": "toast::RunAction",
|
||||
"cmd-shift-s": "workspace::SaveAs",
|
||||
"cmd-shift-n": "workspace::NewWindow",
|
||||
"ctrl-`": "terminal_panel::Toggle",
|
||||
"ctrl-`": "terminal_panel::ToggleFocus",
|
||||
"cmd-1": ["workspace::ActivatePane", 0],
|
||||
"cmd-2": ["workspace::ActivatePane", 1],
|
||||
"cmd-3": ["workspace::ActivatePane", 2],
|
||||
@@ -690,7 +674,7 @@
|
||||
"cmd-shift-f": "pane::DeploySearch",
|
||||
"cmd-shift-h": ["pane::DeploySearch", { "replace_enabled": true }],
|
||||
"cmd-shift-t": "pane::ReopenClosedItem",
|
||||
"cmd-k cmd-s": "zed::OpenKeymap",
|
||||
"cmd-k cmd-s": "zed::OpenKeymapEditor",
|
||||
"cmd-k cmd-t": "theme_selector::Toggle",
|
||||
"ctrl-alt-cmd-p": "settings_profile_selector::Toggle",
|
||||
"cmd-t": "project_symbols::Toggle",
|
||||
@@ -706,7 +690,6 @@
|
||||
"cmd-?": "agent::ToggleFocus",
|
||||
"cmd-alt-s": "workspace::SaveAll",
|
||||
"cmd-k m": "language_selector::Toggle",
|
||||
"cmd-k cmd-m": "toolchain::AddToolchain",
|
||||
"escape": "workspace::Unfollow",
|
||||
"cmd-k cmd-left": "workspace::ActivatePaneLeft",
|
||||
"cmd-k cmd-right": "workspace::ActivatePaneRight",
|
||||
@@ -727,9 +710,7 @@
|
||||
"bindings": {
|
||||
"cmd-n": "workspace::NewFile",
|
||||
"cmd-shift-r": "task::Spawn",
|
||||
// All task parameters are captured and unchanged between reruns by default.
|
||||
// Use the `"reevaluate_context"` parameter to control this.
|
||||
"cmd-alt-r": ["task::Rerun", { "reevaluate_context": false }],
|
||||
"cmd-alt-r": "task::Rerun",
|
||||
"ctrl-alt-shift-r": ["task::Spawn", { "reveal_target": "center" }]
|
||||
// also possible to spawn tasks by name:
|
||||
// "foo-bar": ["task::Spawn", { "task_name": "MyTask", "reveal_target": "dock" }]
|
||||
@@ -1113,13 +1094,6 @@
|
||||
"tab": "channel_modal::ToggleMode"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "ToolchainSelector",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"cmd-shift-a": "toolchain::AddToolchain"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "FileFinder || (FileFinder > Picker > Editor)",
|
||||
"use_key_equivalents": true,
|
||||
@@ -1149,13 +1123,6 @@
|
||||
"ctrl-backspace": "tab_switcher::CloseSelectedItem"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "StashList || (StashList > Picker > Editor)",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"ctrl-shift-backspace": "stash_picker::DropStashItem"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "Terminal",
|
||||
"use_key_equivalents": true,
|
||||
@@ -1247,13 +1214,6 @@
|
||||
"cmd-enter": "menu::Confirm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "ContextServerToolsModal",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"escape": "menu::Cancel"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "OnboardingAiConfigurationModal",
|
||||
"use_key_equivalents": true,
|
||||
@@ -1334,7 +1294,10 @@
|
||||
"context": "Onboarding",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"cmd-enter": "onboarding::Finish",
|
||||
"cmd-1": "onboarding::ActivateBasicsPage",
|
||||
"cmd-2": "onboarding::ActivateEditingPage",
|
||||
"cmd-3": "onboarding::ActivateAISetupPage",
|
||||
"cmd-escape": "onboarding::Finish",
|
||||
"alt-tab": "onboarding::SignIn",
|
||||
"alt-shift-a": "onboarding::OpenAccount"
|
||||
}
|
||||
@@ -1345,44 +1308,5 @@
|
||||
"bindings": {
|
||||
"ctrl-shift-enter": "workspace::OpenWithSystem"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "SettingsWindow",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"cmd-w": "workspace::CloseWindow",
|
||||
"escape": "workspace::CloseWindow",
|
||||
"cmd-m": "settings_editor::Minimize",
|
||||
"cmd-f": "search::FocusSearch",
|
||||
"left": "settings_editor::ToggleFocusNav",
|
||||
"cmd-shift-e": "settings_editor::ToggleFocusNav",
|
||||
// todo(settings_ui): cut this down based on the max files and overflow UI
|
||||
"ctrl-1": ["settings_editor::FocusFile", 0],
|
||||
"ctrl-2": ["settings_editor::FocusFile", 1],
|
||||
"ctrl-3": ["settings_editor::FocusFile", 2],
|
||||
"ctrl-4": ["settings_editor::FocusFile", 3],
|
||||
"ctrl-5": ["settings_editor::FocusFile", 4],
|
||||
"ctrl-6": ["settings_editor::FocusFile", 5],
|
||||
"ctrl-7": ["settings_editor::FocusFile", 6],
|
||||
"ctrl-8": ["settings_editor::FocusFile", 7],
|
||||
"ctrl-9": ["settings_editor::FocusFile", 8],
|
||||
"ctrl-0": ["settings_editor::FocusFile", 9],
|
||||
"cmd-{": "settings_editor::FocusPreviousFile",
|
||||
"cmd-}": "settings_editor::FocusNextFile"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "SettingsWindow > NavigationMenu",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"up": "settings_editor::FocusPreviousNavEntry",
|
||||
"down": "settings_editor::FocusNextNavEntry",
|
||||
"right": "settings_editor::ExpandNavEntry",
|
||||
"left": "settings_editor::CollapseNavEntry",
|
||||
"pageup": "settings_editor::FocusPreviousRootNavEntry",
|
||||
"pagedown": "settings_editor::FocusNextRootNavEntry",
|
||||
"home": "settings_editor::FocusFirstNavEntry",
|
||||
"end": "settings_editor::FocusLastNavEntry"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
"up": "menu::SelectPrevious",
|
||||
"enter": "menu::Confirm",
|
||||
"ctrl-enter": "menu::SecondaryConfirm",
|
||||
"ctrl-escape": "menu::Cancel",
|
||||
"ctrl-c": "menu::Cancel",
|
||||
"escape": "menu::Cancel",
|
||||
"shift-alt-enter": "menu::Restart",
|
||||
@@ -24,13 +25,13 @@
|
||||
"ctrl-alt-enter": ["picker::ConfirmInput", { "secondary": true }],
|
||||
"ctrl-shift-w": "workspace::CloseWindow",
|
||||
"shift-escape": "workspace::ToggleZoom",
|
||||
"open": "workspace::Open",
|
||||
"ctrl-o": "workspace::Open",
|
||||
"ctrl-=": ["zed::IncreaseBufferFontSize", { "persist": false }],
|
||||
"ctrl-shift-=": ["zed::IncreaseBufferFontSize", { "persist": false }],
|
||||
"ctrl--": ["zed::DecreaseBufferFontSize", { "persist": false }],
|
||||
"ctrl-0": ["zed::ResetBufferFontSize", { "persist": false }],
|
||||
"ctrl-,": "zed::OpenSettings",
|
||||
"ctrl-alt-,": "zed::OpenSettingsFile",
|
||||
"ctrl-q": "zed::Quit",
|
||||
"f4": "debugger::Start",
|
||||
"shift-f5": "debugger::Stop",
|
||||
@@ -65,15 +66,20 @@
|
||||
"ctrl-k": "editor::CutToEndOfLine",
|
||||
"ctrl-k ctrl-q": "editor::Rewrap",
|
||||
"ctrl-k q": "editor::Rewrap",
|
||||
"ctrl-backspace": ["editor::DeleteToPreviousWordStart", { "ignore_newlines": false, "ignore_brackets": false }],
|
||||
"ctrl-delete": ["editor::DeleteToNextWordEnd", { "ignore_newlines": false, "ignore_brackets": false }],
|
||||
"ctrl-backspace": "editor::DeleteToPreviousWordStart",
|
||||
"ctrl-delete": "editor::DeleteToNextWordEnd",
|
||||
"cut": "editor::Cut",
|
||||
"shift-delete": "editor::Cut",
|
||||
"ctrl-x": "editor::Cut",
|
||||
"copy": "editor::Copy",
|
||||
"ctrl-insert": "editor::Copy",
|
||||
"ctrl-c": "editor::Copy",
|
||||
"paste": "editor::Paste",
|
||||
"shift-insert": "editor::Paste",
|
||||
"ctrl-v": "editor::Paste",
|
||||
"undo": "editor::Undo",
|
||||
"ctrl-z": "editor::Undo",
|
||||
"redo": "editor::Redo",
|
||||
"ctrl-y": "editor::Redo",
|
||||
"ctrl-shift-z": "editor::Redo",
|
||||
"up": "editor::MoveUp",
|
||||
@@ -132,9 +138,10 @@
|
||||
"ctrl-shift-enter": "editor::NewlineAbove",
|
||||
"ctrl-k ctrl-z": "editor::ToggleSoftWrap",
|
||||
"ctrl-k z": "editor::ToggleSoftWrap",
|
||||
"find": "buffer_search::Deploy",
|
||||
"ctrl-f": "buffer_search::Deploy",
|
||||
"ctrl-h": "buffer_search::DeployReplace",
|
||||
"ctrl-shift-.": "agent::QuoteSelection",
|
||||
"ctrl-shift-.": "assistant::QuoteSelection",
|
||||
"ctrl-shift-,": "assistant::InsertIntoEditor",
|
||||
"shift-alt-e": "editor::SelectEnclosingSymbol",
|
||||
"ctrl-shift-backspace": "editor::GoToPreviousChange",
|
||||
@@ -170,6 +177,7 @@
|
||||
"context": "Markdown",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"copy": "markdown::Copy",
|
||||
"ctrl-c": "markdown::Copy"
|
||||
}
|
||||
},
|
||||
@@ -217,6 +225,7 @@
|
||||
"bindings": {
|
||||
"ctrl-enter": "assistant::Assist",
|
||||
"ctrl-s": "workspace::Save",
|
||||
"save": "workspace::Save",
|
||||
"ctrl-shift-,": "assistant::InsertIntoEditor",
|
||||
"shift-enter": "assistant::Split",
|
||||
"ctrl-r": "assistant::CycleMessageRole",
|
||||
@@ -244,15 +253,12 @@
|
||||
"ctrl-shift-i": "agent::ToggleOptionsMenu",
|
||||
// "ctrl-shift-alt-n": "agent::ToggleNewThreadMenu",
|
||||
"shift-alt-escape": "agent::ExpandMessageEditor",
|
||||
"ctrl-shift-.": "agent::QuoteSelection",
|
||||
"ctrl-shift-.": "assistant::QuoteSelection",
|
||||
"shift-alt-e": "agent::RemoveAllContext",
|
||||
"ctrl-shift-e": "project_panel::ToggleFocus",
|
||||
"ctrl-shift-enter": "agent::ContinueThread",
|
||||
"super-ctrl-b": "agent::ToggleBurnMode",
|
||||
"alt-enter": "agent::ContinueWithBurnMode",
|
||||
"ctrl-y": "agent::AllowOnce",
|
||||
"ctrl-alt-y": "agent::AllowAlways",
|
||||
"ctrl-alt-z": "agent::RejectOnce"
|
||||
"alt-enter": "agent::ContinueWithBurnMode"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -266,6 +272,7 @@
|
||||
"context": "AgentPanel > Markdown",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"copy": "markdown::CopyAsMarkdown",
|
||||
"ctrl-c": "markdown::CopyAsMarkdown"
|
||||
}
|
||||
},
|
||||
@@ -340,31 +347,13 @@
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "AcpThread > ModeSelector",
|
||||
"bindings": {
|
||||
"ctrl-enter": "menu::Confirm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "AcpThread > Editor && !use_modifier_to_send",
|
||||
"context": "AcpThread > Editor",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"enter": "agent::Chat",
|
||||
"ctrl-shift-r": "agent::OpenAgentDiff",
|
||||
"ctrl-shift-y": "agent::KeepAll",
|
||||
"ctrl-shift-n": "agent::RejectAll",
|
||||
"shift-tab": "agent::CycleModeSelector"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "AcpThread > Editor && use_modifier_to_send",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"ctrl-enter": "agent::Chat",
|
||||
"ctrl-shift-r": "agent::OpenAgentDiff",
|
||||
"ctrl-shift-y": "agent::KeepAll",
|
||||
"ctrl-shift-n": "agent::RejectAll",
|
||||
"shift-tab": "agent::CycleModeSelector"
|
||||
"ctrl-shift-n": "agent::RejectAll"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -378,9 +367,9 @@
|
||||
"context": "PromptLibrary",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"new": "rules_library::NewRule",
|
||||
"ctrl-n": "rules_library::NewRule",
|
||||
"ctrl-shift-s": "rules_library::ToggleDefaultRule",
|
||||
"ctrl-w": "workspace::CloseWindow"
|
||||
"ctrl-shift-s": "rules_library::ToggleDefaultRule"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -392,6 +381,7 @@
|
||||
"enter": "search::SelectNextMatch",
|
||||
"shift-enter": "search::SelectPreviousMatch",
|
||||
"alt-enter": "search::SelectAllMatches",
|
||||
"find": "search::FocusSearch",
|
||||
"ctrl-f": "search::FocusSearch",
|
||||
"ctrl-h": "search::ToggleReplace",
|
||||
"ctrl-l": "search::ToggleSelection"
|
||||
@@ -418,6 +408,7 @@
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"escape": "project_search::ToggleFocus",
|
||||
"shift-find": "search::FocusSearch",
|
||||
"ctrl-shift-f": "search::FocusSearch",
|
||||
"ctrl-shift-h": "search::ToggleReplace",
|
||||
"alt-r": "search::ToggleRegex" // vscode
|
||||
@@ -477,16 +468,18 @@
|
||||
"ctrl-k ctrl-w": "workspace::CloseAllItemsAndPanes",
|
||||
"back": "pane::GoBack",
|
||||
"alt--": "pane::GoBack",
|
||||
"forward": "pane::GoForward",
|
||||
"alt-=": "pane::GoForward",
|
||||
"forward": "pane::GoForward",
|
||||
"f3": "search::SelectNextMatch",
|
||||
"shift-f3": "search::SelectPreviousMatch",
|
||||
"shift-find": "project_search::ToggleFocus",
|
||||
"ctrl-shift-f": "project_search::ToggleFocus",
|
||||
"shift-alt-h": "search::ToggleReplace",
|
||||
"alt-l": "search::ToggleSelection",
|
||||
"alt-enter": "search::SelectAllMatches",
|
||||
"alt-c": "search::ToggleCaseSensitive",
|
||||
"alt-w": "search::ToggleWholeWord",
|
||||
"alt-find": "project_search::ToggleFilters",
|
||||
"alt-f": "project_search::ToggleFilters",
|
||||
"alt-r": "search::ToggleRegex",
|
||||
// "ctrl-shift-alt-x": "search::ToggleRegex",
|
||||
@@ -507,8 +500,8 @@
|
||||
"alt-down": "editor::MoveLineDown",
|
||||
"shift-alt-up": "editor::DuplicateLineUp",
|
||||
"shift-alt-down": "editor::DuplicateLineDown",
|
||||
"shift-alt-right": "editor::SelectLargerSyntaxNode", // Expand selection
|
||||
"shift-alt-left": "editor::SelectSmallerSyntaxNode", // Shrink selection
|
||||
"shift-alt-right": "editor::SelectLargerSyntaxNode", // Expand Selection
|
||||
"shift-alt-left": "editor::SelectSmallerSyntaxNode", // Shrink Selection
|
||||
"ctrl-shift-l": "editor::SelectAllMatches", // Select all occurrences of current selection
|
||||
"ctrl-f2": "editor::SelectAllMatches", // Select all occurrences of current word
|
||||
"ctrl-d": ["editor::SelectNext", { "replace_newest": false }], // editor.action.addSelectionToNextFindMatch / find_under_expand
|
||||
@@ -536,15 +529,15 @@
|
||||
"ctrl-k ctrl-l": "editor::ToggleFold",
|
||||
"ctrl-k ctrl-[": "editor::FoldRecursive",
|
||||
"ctrl-k ctrl-]": "editor::UnfoldRecursive",
|
||||
"ctrl-k ctrl-1": "editor::FoldAtLevel_1",
|
||||
"ctrl-k ctrl-2": "editor::FoldAtLevel_2",
|
||||
"ctrl-k ctrl-3": "editor::FoldAtLevel_3",
|
||||
"ctrl-k ctrl-4": "editor::FoldAtLevel_4",
|
||||
"ctrl-k ctrl-5": "editor::FoldAtLevel_5",
|
||||
"ctrl-k ctrl-6": "editor::FoldAtLevel_6",
|
||||
"ctrl-k ctrl-7": "editor::FoldAtLevel_7",
|
||||
"ctrl-k ctrl-8": "editor::FoldAtLevel_8",
|
||||
"ctrl-k ctrl-9": "editor::FoldAtLevel_9",
|
||||
"ctrl-k ctrl-1": ["editor::FoldAtLevel", 1],
|
||||
"ctrl-k ctrl-2": ["editor::FoldAtLevel", 2],
|
||||
"ctrl-k ctrl-3": ["editor::FoldAtLevel", 3],
|
||||
"ctrl-k ctrl-4": ["editor::FoldAtLevel", 4],
|
||||
"ctrl-k ctrl-5": ["editor::FoldAtLevel", 5],
|
||||
"ctrl-k ctrl-6": ["editor::FoldAtLevel", 6],
|
||||
"ctrl-k ctrl-7": ["editor::FoldAtLevel", 7],
|
||||
"ctrl-k ctrl-8": ["editor::FoldAtLevel", 8],
|
||||
"ctrl-k ctrl-9": ["editor::FoldAtLevel", 9],
|
||||
"ctrl-k ctrl-0": "editor::FoldAll",
|
||||
"ctrl-k ctrl-j": "editor::UnfoldAll",
|
||||
"ctrl-space": "editor::ShowCompletions",
|
||||
@@ -586,21 +579,27 @@
|
||||
"context": "Workspace",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"alt-open": ["projects::OpenRecent", { "create_new_window": false }],
|
||||
// Change the default action on `menu::Confirm` by setting the parameter
|
||||
// "ctrl-alt-o": ["projects::OpenRecent", { "create_new_window": true }],
|
||||
"ctrl-r": ["projects::OpenRecent", { "create_new_window": false }],
|
||||
"shift-alt-open": ["projects::OpenRemote", { "from_existing_connection": false, "create_new_window": false }],
|
||||
// Change to open path modal for existing remote connection by setting the parameter
|
||||
// "ctrl-shift-alt-o": "["projects::OpenRemote", { "from_existing_connection": true }]",
|
||||
"ctrl-shift-alt-o": ["projects::OpenRemote", { "from_existing_connection": false, "create_new_window": false }],
|
||||
"shift-alt-b": "branches::OpenRecent",
|
||||
"shift-alt-enter": "toast::RunAction",
|
||||
"ctrl-shift-`": "workspace::NewTerminal",
|
||||
"save": "workspace::Save",
|
||||
"ctrl-s": "workspace::Save",
|
||||
"ctrl-k ctrl-shift-s": "workspace::SaveWithoutFormat",
|
||||
"shift-save": "workspace::SaveAs",
|
||||
"ctrl-shift-s": "workspace::SaveAs",
|
||||
"new": "workspace::NewFile",
|
||||
"ctrl-n": "workspace::NewFile",
|
||||
"shift-new": "workspace::NewWindow",
|
||||
"ctrl-shift-n": "workspace::NewWindow",
|
||||
"ctrl-`": "terminal_panel::Toggle",
|
||||
"ctrl-`": "terminal_panel::ToggleFocus",
|
||||
"f10": ["app_menu::OpenApplicationMenu", "Zed"],
|
||||
"alt-1": ["workspace::ActivatePane", 0],
|
||||
"alt-2": ["workspace::ActivatePane", 1],
|
||||
@@ -620,10 +619,13 @@
|
||||
"shift-alt--": ["workspace::DecreaseActiveDockSize", { "px": 0 }],
|
||||
"shift-alt-=": ["workspace::IncreaseActiveDockSize", { "px": 0 }],
|
||||
"shift-alt-0": "workspace::ResetOpenDocksSize",
|
||||
"ctrl-shift-alt--": ["workspace::DecreaseOpenDocksSize", { "px": 0 }],
|
||||
"ctrl-shift-alt-=": ["workspace::IncreaseOpenDocksSize", { "px": 0 }],
|
||||
"shift-find": "pane::DeploySearch",
|
||||
"ctrl-shift-f": "pane::DeploySearch",
|
||||
"ctrl-shift-h": ["pane::DeploySearch", { "replace_enabled": true }],
|
||||
"ctrl-shift-t": "pane::ReopenClosedItem",
|
||||
"ctrl-k ctrl-s": "zed::OpenKeymap",
|
||||
"ctrl-k ctrl-s": "zed::OpenKeymapEditor",
|
||||
"ctrl-k ctrl-t": "theme_selector::Toggle",
|
||||
"ctrl-alt-super-p": "settings_profile_selector::Toggle",
|
||||
"ctrl-t": "project_symbols::Toggle",
|
||||
@@ -639,9 +641,9 @@
|
||||
"ctrl-shift-g": "git_panel::ToggleFocus",
|
||||
"ctrl-shift-d": "debug_panel::ToggleFocus",
|
||||
"ctrl-shift-/": "agent::ToggleFocus",
|
||||
"alt-save": "workspace::SaveAll",
|
||||
"ctrl-k s": "workspace::SaveAll",
|
||||
"ctrl-k m": "language_selector::Toggle",
|
||||
"ctrl-m ctrl-m": "toolchain::AddToolchain",
|
||||
"escape": "workspace::Unfollow",
|
||||
"ctrl-k ctrl-left": "workspace::ActivatePaneLeft",
|
||||
"ctrl-k ctrl-right": "workspace::ActivatePaneRight",
|
||||
@@ -652,9 +654,7 @@
|
||||
"ctrl-k shift-up": "workspace::SwapPaneUp",
|
||||
"ctrl-k shift-down": "workspace::SwapPaneDown",
|
||||
"ctrl-shift-x": "zed::Extensions",
|
||||
// All task parameters are captured and unchanged between reruns by default.
|
||||
// Use the `"reevaluate_context"` parameter to control this.
|
||||
"ctrl-shift-r": ["task::Rerun", { "reevaluate_context": false }],
|
||||
"ctrl-shift-r": "task::Rerun",
|
||||
"alt-t": "task::Rerun",
|
||||
"shift-alt-t": "task::Spawn",
|
||||
"shift-alt-r": ["task::Spawn", { "reveal_target": "center" }],
|
||||
@@ -848,7 +848,9 @@
|
||||
"bindings": {
|
||||
"left": "outline_panel::CollapseSelectedEntry",
|
||||
"right": "outline_panel::ExpandSelectedEntry",
|
||||
"alt-copy": "outline_panel::CopyPath",
|
||||
"shift-alt-c": "outline_panel::CopyPath",
|
||||
"shift-alt-copy": "workspace::CopyRelativePath",
|
||||
"ctrl-shift-alt-c": "workspace::CopyRelativePath",
|
||||
"ctrl-alt-r": "outline_panel::RevealInFileManager",
|
||||
"space": "outline_panel::OpenSelectedEntry",
|
||||
@@ -864,14 +866,21 @@
|
||||
"bindings": {
|
||||
"left": "project_panel::CollapseSelectedEntry",
|
||||
"right": "project_panel::ExpandSelectedEntry",
|
||||
"new": "project_panel::NewFile",
|
||||
"ctrl-n": "project_panel::NewFile",
|
||||
"alt-new": "project_panel::NewDirectory",
|
||||
"alt-n": "project_panel::NewDirectory",
|
||||
"cut": "project_panel::Cut",
|
||||
"ctrl-x": "project_panel::Cut",
|
||||
"copy": "project_panel::Copy",
|
||||
"ctrl-insert": "project_panel::Copy",
|
||||
"ctrl-c": "project_panel::Copy",
|
||||
"paste": "project_panel::Paste",
|
||||
"shift-insert": "project_panel::Paste",
|
||||
"ctrl-v": "project_panel::Paste",
|
||||
"alt-copy": "project_panel::CopyPath",
|
||||
"shift-alt-c": "project_panel::CopyPath",
|
||||
"shift-alt-copy": "workspace::CopyRelativePath",
|
||||
"ctrl-k ctrl-shift-c": "workspace::CopyRelativePath",
|
||||
"enter": "project_panel::Rename",
|
||||
"f2": "project_panel::Rename",
|
||||
@@ -883,6 +892,7 @@
|
||||
"ctrl-alt-r": "project_panel::RevealInFileManager",
|
||||
"ctrl-shift-enter": "project_panel::OpenWithSystem",
|
||||
"alt-d": "project_panel::CompareMarkedFiles",
|
||||
"shift-find": "project_panel::NewSearchInDirectory",
|
||||
"ctrl-k ctrl-shift-f": "project_panel::NewSearchInDirectory",
|
||||
"shift-down": "menu::SelectNext",
|
||||
"shift-up": "menu::SelectPrevious",
|
||||
@@ -1065,13 +1075,6 @@
|
||||
"tab": "channel_modal::ToggleMode"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "ToolchainSelector",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"ctrl-shift-a": "toolchain::AddToolchain"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "FileFinder || (FileFinder > Picker > Editor)",
|
||||
"use_key_equivalents": true,
|
||||
@@ -1102,20 +1105,15 @@
|
||||
"ctrl-backspace": "tab_switcher::CloseSelectedItem"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "StashList || (StashList > Picker > Editor)",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"ctrl-shift-backspace": "stash_picker::DropStashItem"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "Terminal",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"ctrl-alt-space": "terminal::ShowCharacterPalette",
|
||||
"copy": "terminal::Copy",
|
||||
"ctrl-insert": "terminal::Copy",
|
||||
"ctrl-shift-c": "terminal::Copy",
|
||||
"paste": "terminal::Paste",
|
||||
"shift-insert": "terminal::Paste",
|
||||
"ctrl-shift-v": "terminal::Paste",
|
||||
"ctrl-enter": "assistant::InlineAssist",
|
||||
@@ -1123,7 +1121,6 @@
|
||||
"alt-f": ["terminal::SendText", "\u001bf"],
|
||||
"alt-.": ["terminal::SendText", "\u001b."],
|
||||
"ctrl-delete": ["terminal::SendText", "\u001bd"],
|
||||
"ctrl-n": "workspace::NewTerminal",
|
||||
// Overrides for conflicting keybindings
|
||||
"ctrl-b": ["terminal::SendKeystroke", "ctrl-b"],
|
||||
"ctrl-c": ["terminal::SendKeystroke", "ctrl-c"],
|
||||
@@ -1132,6 +1129,7 @@
|
||||
"ctrl-w": ["terminal::SendKeystroke", "ctrl-w"],
|
||||
"ctrl-backspace": ["terminal::SendKeystroke", "ctrl-w"],
|
||||
"ctrl-shift-a": "editor::SelectAll",
|
||||
"find": "buffer_search::Deploy",
|
||||
"ctrl-shift-f": "buffer_search::Deploy",
|
||||
"ctrl-shift-l": "terminal::Clear",
|
||||
"ctrl-shift-w": "pane::CloseActiveItem",
|
||||
@@ -1169,13 +1167,6 @@
|
||||
"ctrl-enter": "menu::Confirm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "ContextServerToolsModal",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"escape": "menu::Cancel"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "OnboardingAiConfigurationModal",
|
||||
"use_key_equivalents": true,
|
||||
@@ -1219,6 +1210,7 @@
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"ctrl-f": "search::FocusSearch",
|
||||
"alt-find": "keymap_editor::ToggleKeystrokeSearch",
|
||||
"alt-f": "keymap_editor::ToggleKeystrokeSearch",
|
||||
"alt-c": "keymap_editor::ToggleConflictFilter",
|
||||
"enter": "keymap_editor::EditBinding",
|
||||
@@ -1257,48 +1249,12 @@
|
||||
"context": "Onboarding",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"ctrl-enter": "onboarding::Finish",
|
||||
"alt-shift-l": "onboarding::SignIn",
|
||||
"ctrl-1": "onboarding::ActivateBasicsPage",
|
||||
"ctrl-2": "onboarding::ActivateEditingPage",
|
||||
"ctrl-3": "onboarding::ActivateAISetupPage",
|
||||
"ctrl-escape": "onboarding::Finish",
|
||||
"alt-tab": "onboarding::SignIn",
|
||||
"shift-alt-a": "onboarding::OpenAccount"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "SettingsWindow",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"ctrl-w": "workspace::CloseWindow",
|
||||
"escape": "workspace::CloseWindow",
|
||||
"ctrl-m": "settings_editor::Minimize",
|
||||
"ctrl-f": "search::FocusSearch",
|
||||
"left": "settings_editor::ToggleFocusNav",
|
||||
"ctrl-shift-e": "settings_editor::ToggleFocusNav",
|
||||
// todo(settings_ui): cut this down based on the max files and overflow UI
|
||||
"ctrl-1": ["settings_editor::FocusFile", 0],
|
||||
"ctrl-2": ["settings_editor::FocusFile", 1],
|
||||
"ctrl-3": ["settings_editor::FocusFile", 2],
|
||||
"ctrl-4": ["settings_editor::FocusFile", 3],
|
||||
"ctrl-5": ["settings_editor::FocusFile", 4],
|
||||
"ctrl-6": ["settings_editor::FocusFile", 5],
|
||||
"ctrl-7": ["settings_editor::FocusFile", 6],
|
||||
"ctrl-8": ["settings_editor::FocusFile", 7],
|
||||
"ctrl-9": ["settings_editor::FocusFile", 8],
|
||||
"ctrl-0": ["settings_editor::FocusFile", 9],
|
||||
"ctrl-pageup": "settings_editor::FocusPreviousFile",
|
||||
"ctrl-pagedown": "settings_editor::FocusNextFile"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "SettingsWindow > NavigationMenu",
|
||||
"use_key_equivalents": true,
|
||||
"bindings": {
|
||||
"up": "settings_editor::FocusPreviousNavEntry",
|
||||
"down": "settings_editor::FocusNextNavEntry",
|
||||
"right": "settings_editor::ExpandNavEntry",
|
||||
"left": "settings_editor::CollapseNavEntry",
|
||||
"pageup": "settings_editor::FocusPreviousRootNavEntry",
|
||||
"pagedown": "settings_editor::FocusNextRootNavEntry",
|
||||
"home": "settings_editor::FocusFirstNavEntry",
|
||||
"end": "settings_editor::FocusLastNavEntry"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@@ -42,7 +42,7 @@
|
||||
"alt-,": "pane::GoBack", // xref-pop-marker-stack
|
||||
"ctrl-x h": "editor::SelectAll", // mark-whole-buffer
|
||||
"ctrl-d": "editor::Delete", // delete-char
|
||||
"alt-d": ["editor::DeleteToNextWordEnd", { "ignore_newlines": false, "ignore_brackets": false }], // kill-word
|
||||
"alt-d": "editor::DeleteToNextWordEnd", // kill-word
|
||||
"ctrl-k": "editor::KillRingCut", // kill-line
|
||||
"ctrl-w": "editor::Cut", // kill-region
|
||||
"alt-w": "editor::Copy", // kill-ring-save
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[
|
||||
{
|
||||
"bindings": {
|
||||
"ctrl-alt-s": "zed::OpenSettingsFile",
|
||||
"ctrl-alt-s": "zed::OpenSettings",
|
||||
"ctrl-{": "pane::ActivatePreviousItem",
|
||||
"ctrl-}": "pane::ActivateNextItem",
|
||||
"shift-escape": null, // Unmap workspace::zoom
|
||||
@@ -125,7 +125,7 @@
|
||||
{
|
||||
"context": "Workspace || Editor",
|
||||
"bindings": {
|
||||
"alt-f12": "terminal_panel::Toggle",
|
||||
"alt-f12": "terminal_panel::ToggleFocus",
|
||||
"ctrl-shift-k": "git::Push"
|
||||
}
|
||||
},
|
||||
|
||||
@@ -50,8 +50,8 @@
|
||||
"ctrl-k ctrl-u": "editor::ConvertToUpperCase",
|
||||
"ctrl-k ctrl-l": "editor::ConvertToLowerCase",
|
||||
"shift-alt-m": "markdown::OpenPreviewToTheSide",
|
||||
"ctrl-backspace": ["editor::DeleteToPreviousWordStart", { "ignore_newlines": false, "ignore_brackets": false }],
|
||||
"ctrl-delete": ["editor::DeleteToNextWordEnd", { "ignore_newlines": false, "ignore_brackets": false }],
|
||||
"ctrl-backspace": "editor::DeleteToPreviousWordStart",
|
||||
"ctrl-delete": "editor::DeleteToNextWordEnd",
|
||||
"alt-right": "editor::MoveToNextSubwordEnd",
|
||||
"alt-left": "editor::MoveToPreviousSubwordStart",
|
||||
"alt-shift-right": "editor::SelectToNextSubwordEnd",
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
// from the command palette.
|
||||
[
|
||||
{
|
||||
"context": "!GitPanel",
|
||||
"bindings": {
|
||||
"ctrl-g": "menu::Cancel"
|
||||
}
|
||||
@@ -43,7 +42,7 @@
|
||||
"alt-,": "pane::GoBack", // xref-pop-marker-stack
|
||||
"ctrl-x h": "editor::SelectAll", // mark-whole-buffer
|
||||
"ctrl-d": "editor::Delete", // delete-char
|
||||
"alt-d": ["editor::DeleteToNextWordEnd", { "ignore_newlines": false, "ignore_brackets": false }], // kill-word
|
||||
"alt-d": "editor::DeleteToNextWordEnd", // kill-word
|
||||
"ctrl-k": "editor::KillRingCut", // kill-line
|
||||
"ctrl-w": "editor::Cut", // kill-region
|
||||
"alt-w": "editor::Copy", // kill-ring-save
|
||||
|
||||
@@ -127,7 +127,7 @@
|
||||
{
|
||||
"context": "Workspace || Editor",
|
||||
"bindings": {
|
||||
"alt-f12": "terminal_panel::Toggle",
|
||||
"alt-f12": "terminal_panel::ToggleFocus",
|
||||
"cmd-shift-k": "git::Push"
|
||||
}
|
||||
},
|
||||
|
||||
@@ -52,8 +52,8 @@
|
||||
"cmd-k cmd-l": "editor::ConvertToLowerCase",
|
||||
"cmd-shift-j": "editor::JoinLines",
|
||||
"shift-alt-m": "markdown::OpenPreviewToTheSide",
|
||||
"ctrl-backspace": ["editor::DeleteToPreviousWordStart", { "ignore_newlines": false, "ignore_brackets": false }],
|
||||
"ctrl-delete": ["editor::DeleteToNextWordEnd", { "ignore_newlines": false, "ignore_brackets": false }],
|
||||
"ctrl-backspace": "editor::DeleteToPreviousWordStart",
|
||||
"ctrl-delete": "editor::DeleteToNextWordEnd",
|
||||
"ctrl-right": "editor::MoveToNextSubwordEnd",
|
||||
"ctrl-left": "editor::MoveToPreviousSubwordStart",
|
||||
"ctrl-shift-right": "editor::SelectToNextSubwordEnd",
|
||||
|
||||
@@ -21,10 +21,10 @@
|
||||
{
|
||||
"context": "Editor",
|
||||
"bindings": {
|
||||
"alt-backspace": ["editor::DeleteToPreviousWordStart", { "ignore_newlines": false, "ignore_brackets": false }],
|
||||
"alt-shift-backspace": ["editor::DeleteToNextWordEnd", { "ignore_newlines": false, "ignore_brackets": false }],
|
||||
"alt-delete": ["editor::DeleteToNextWordEnd", { "ignore_newlines": false, "ignore_brackets": false }],
|
||||
"alt-shift-delete": ["editor::DeleteToNextWordEnd", { "ignore_newlines": false, "ignore_brackets": false }],
|
||||
"alt-backspace": "editor::DeleteToPreviousWordStart",
|
||||
"alt-shift-backspace": "editor::DeleteToNextWordEnd",
|
||||
"alt-delete": "editor::DeleteToNextWordEnd",
|
||||
"alt-shift-delete": "editor::DeleteToNextWordEnd",
|
||||
"ctrl-backspace": "editor::DeleteToPreviousSubwordStart",
|
||||
"ctrl-delete": "editor::DeleteToNextSubwordEnd",
|
||||
"alt-left": ["editor::MoveToPreviousWordStart", { "stop_at_soft_wraps": true }],
|
||||
|
||||
@@ -32,6 +32,34 @@
|
||||
"(": "vim::SentenceBackward",
|
||||
")": "vim::SentenceForward",
|
||||
"|": "vim::GoToColumn",
|
||||
"] ]": "vim::NextSectionStart",
|
||||
"] [": "vim::NextSectionEnd",
|
||||
"[ [": "vim::PreviousSectionStart",
|
||||
"[ ]": "vim::PreviousSectionEnd",
|
||||
"] m": "vim::NextMethodStart",
|
||||
"] shift-m": "vim::NextMethodEnd",
|
||||
"[ m": "vim::PreviousMethodStart",
|
||||
"[ shift-m": "vim::PreviousMethodEnd",
|
||||
"[ *": "vim::PreviousComment",
|
||||
"[ /": "vim::PreviousComment",
|
||||
"] *": "vim::NextComment",
|
||||
"] /": "vim::NextComment",
|
||||
"[ -": "vim::PreviousLesserIndent",
|
||||
"[ +": "vim::PreviousGreaterIndent",
|
||||
"[ =": "vim::PreviousSameIndent",
|
||||
"] -": "vim::NextLesserIndent",
|
||||
"] +": "vim::NextGreaterIndent",
|
||||
"] =": "vim::NextSameIndent",
|
||||
"] b": "pane::ActivateNextItem",
|
||||
"[ b": "pane::ActivatePreviousItem",
|
||||
"] shift-b": "pane::ActivateLastItem",
|
||||
"[ shift-b": ["pane::ActivateItem", 0],
|
||||
"] space": "vim::InsertEmptyLineBelow",
|
||||
"[ space": "vim::InsertEmptyLineAbove",
|
||||
"[ e": "editor::MoveLineUp",
|
||||
"] e": "editor::MoveLineDown",
|
||||
"[ f": "workspace::FollowNextCollaborator",
|
||||
"] f": "workspace::FollowNextCollaborator",
|
||||
|
||||
// Word motions
|
||||
"w": "vim::NextWordStart",
|
||||
@@ -55,6 +83,10 @@
|
||||
"n": "vim::MoveToNextMatch",
|
||||
"shift-n": "vim::MoveToPreviousMatch",
|
||||
"%": "vim::Matching",
|
||||
"] }": ["vim::UnmatchedForward", { "char": "}" }],
|
||||
"[ {": ["vim::UnmatchedBackward", { "char": "{" }],
|
||||
"] )": ["vim::UnmatchedForward", { "char": ")" }],
|
||||
"[ (": ["vim::UnmatchedBackward", { "char": "(" }],
|
||||
"f": ["vim::PushFindForward", { "before": false, "multiline": false }],
|
||||
"t": ["vim::PushFindForward", { "before": true, "multiline": false }],
|
||||
"shift-f": ["vim::PushFindBackward", { "after": false, "multiline": false }],
|
||||
@@ -95,8 +127,8 @@
|
||||
"g g": "vim::StartOfDocument",
|
||||
"g h": "editor::Hover",
|
||||
"g B": "editor::BlameHover",
|
||||
"g t": "vim::GoToTab",
|
||||
"g shift-t": "vim::GoToPreviousTab",
|
||||
"g t": "pane::ActivateNextItem",
|
||||
"g shift-t": "pane::ActivatePreviousItem",
|
||||
"g d": "editor::GoToDefinition",
|
||||
"g shift-d": "editor::GoToDeclaration",
|
||||
"g y": "editor::GoToTypeDefinition",
|
||||
@@ -187,46 +219,6 @@
|
||||
".": "vim::Repeat"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "vim_mode == normal || vim_mode == visual || vim_mode == operator",
|
||||
"bindings": {
|
||||
"] ]": "vim::NextSectionStart",
|
||||
"] [": "vim::NextSectionEnd",
|
||||
"[ [": "vim::PreviousSectionStart",
|
||||
"[ ]": "vim::PreviousSectionEnd",
|
||||
"] m": "vim::NextMethodStart",
|
||||
"] shift-m": "vim::NextMethodEnd",
|
||||
"[ m": "vim::PreviousMethodStart",
|
||||
"[ shift-m": "vim::PreviousMethodEnd",
|
||||
"[ *": "vim::PreviousComment",
|
||||
"[ /": "vim::PreviousComment",
|
||||
"] *": "vim::NextComment",
|
||||
"] /": "vim::NextComment",
|
||||
"[ -": "vim::PreviousLesserIndent",
|
||||
"[ +": "vim::PreviousGreaterIndent",
|
||||
"[ =": "vim::PreviousSameIndent",
|
||||
"] -": "vim::NextLesserIndent",
|
||||
"] +": "vim::NextGreaterIndent",
|
||||
"] =": "vim::NextSameIndent",
|
||||
"] b": "pane::ActivateNextItem",
|
||||
"[ b": "pane::ActivatePreviousItem",
|
||||
"] shift-b": "pane::ActivateLastItem",
|
||||
"[ shift-b": ["pane::ActivateItem", 0],
|
||||
"] space": "vim::InsertEmptyLineBelow",
|
||||
"[ space": "vim::InsertEmptyLineAbove",
|
||||
"[ e": "editor::MoveLineUp",
|
||||
"] e": "editor::MoveLineDown",
|
||||
"[ f": "workspace::FollowNextCollaborator",
|
||||
"] f": "workspace::FollowNextCollaborator",
|
||||
"] }": ["vim::UnmatchedForward", { "char": "}" }],
|
||||
"[ {": ["vim::UnmatchedBackward", { "char": "{" }],
|
||||
"] )": ["vim::UnmatchedForward", { "char": ")" }],
|
||||
"[ (": ["vim::UnmatchedBackward", { "char": "(" }],
|
||||
// tree-sitter related commands
|
||||
"[ x": "vim::SelectLargerSyntaxNode",
|
||||
"] x": "vim::SelectSmallerSyntaxNode"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "vim_mode == normal",
|
||||
"bindings": {
|
||||
@@ -240,7 +232,6 @@
|
||||
"delete": "vim::DeleteRight",
|
||||
"g shift-j": "vim::JoinLinesNoWhitespace",
|
||||
"y": "vim::PushYank",
|
||||
"shift-y": "vim::YankLine",
|
||||
"x": "vim::DeleteRight",
|
||||
"shift-x": "vim::DeleteLeft",
|
||||
"ctrl-a": "vim::Increment",
|
||||
@@ -258,6 +249,9 @@
|
||||
"g w": "vim::PushRewrap",
|
||||
"g q": "vim::PushRewrap",
|
||||
"insert": "vim::InsertBefore",
|
||||
// tree-sitter related commands
|
||||
"[ x": "vim::SelectLargerSyntaxNode",
|
||||
"] x": "vim::SelectSmallerSyntaxNode",
|
||||
"] d": "editor::GoToDiagnostic",
|
||||
"[ d": "editor::GoToPreviousDiagnostic",
|
||||
"] c": "editor::GoToHunk",
|
||||
@@ -323,28 +317,10 @@
|
||||
"g w": "vim::Rewrap",
|
||||
"g ?": "vim::ConvertToRot13",
|
||||
// "g ?": "vim::ConvertToRot47",
|
||||
"\"": "vim::PushRegister"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "vim_mode == helix_select",
|
||||
"bindings": {
|
||||
"v": "vim::NormalBefore",
|
||||
";": "vim::HelixCollapseSelection",
|
||||
"~": "vim::ChangeCase",
|
||||
"ctrl-a": "vim::Increment",
|
||||
"ctrl-x": "vim::Decrement",
|
||||
"shift-j": "vim::JoinLines",
|
||||
"i": "vim::InsertBefore",
|
||||
"a": "vim::InsertAfter",
|
||||
"p": "vim::Paste",
|
||||
"u": "vim::Undo",
|
||||
"r": "vim::PushReplace",
|
||||
"s": "vim::Substitute",
|
||||
"ctrl-pageup": "pane::ActivatePreviousItem",
|
||||
"ctrl-pagedown": "pane::ActivateNextItem",
|
||||
".": "vim::Repeat",
|
||||
"alt-.": "vim::RepeatFind"
|
||||
"\"": "vim::PushRegister",
|
||||
// tree-sitter related commands
|
||||
"[ x": "editor::SelectLargerSyntaxNode",
|
||||
"] x": "editor::SelectSmallerSyntaxNode"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -361,7 +337,7 @@
|
||||
"ctrl-x ctrl-z": "editor::Cancel",
|
||||
"ctrl-x ctrl-e": "vim::LineDown",
|
||||
"ctrl-x ctrl-y": "vim::LineUp",
|
||||
"ctrl-w": ["editor::DeleteToPreviousWordStart", { "ignore_newlines": false, "ignore_brackets": false }],
|
||||
"ctrl-w": "editor::DeleteToPreviousWordStart",
|
||||
"ctrl-u": "editor::DeleteToBeginningOfLine",
|
||||
"ctrl-t": "vim::Indent",
|
||||
"ctrl-d": "vim::Outdent",
|
||||
@@ -378,15 +354,6 @@
|
||||
"ctrl-s": "editor::ShowSignatureHelp"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "showing_completions",
|
||||
"bindings": {
|
||||
"ctrl-d": "vim::ScrollDown",
|
||||
"ctrl-u": "vim::ScrollUp",
|
||||
"ctrl-e": "vim::LineDown",
|
||||
"ctrl-y": "vim::LineUp"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "(vim_mode == normal || vim_mode == helix_normal) && !menu",
|
||||
"bindings": {
|
||||
@@ -418,25 +385,14 @@
|
||||
"bindings": {
|
||||
"i": "vim::HelixInsert",
|
||||
"a": "vim::HelixAppend",
|
||||
"ctrl-[": "editor::Cancel"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "(vim_mode == helix_normal || vim_mode == helix_select) && !menu",
|
||||
"bindings": {
|
||||
"ctrl-[": "editor::Cancel",
|
||||
";": "vim::HelixCollapseSelection",
|
||||
":": "command_palette::Toggle",
|
||||
"m": "vim::PushHelixMatch",
|
||||
"s": "vim::HelixSelectRegex",
|
||||
"]": ["vim::PushHelixNext", { "around": true }],
|
||||
"[": ["vim::PushHelixPrevious", { "around": true }],
|
||||
"left": "vim::WrappingLeft",
|
||||
"right": "vim::WrappingRight",
|
||||
"h": "vim::WrappingLeft",
|
||||
"l": "vim::WrappingRight",
|
||||
"y": "vim::HelixYank",
|
||||
"p": "vim::HelixPaste",
|
||||
"shift-p": ["vim::HelixPaste", { "before": true }],
|
||||
"alt-;": "vim::OtherEnd",
|
||||
"ctrl-r": "vim::Redo",
|
||||
"f": ["vim::PushFindForward", { "before": false, "multiline": true }],
|
||||
@@ -446,13 +402,21 @@
|
||||
">": "vim::Indent",
|
||||
"<": "vim::Outdent",
|
||||
"=": "vim::AutoIndent",
|
||||
"`": "vim::ConvertToLowerCase",
|
||||
"alt-`": "vim::ConvertToUpperCase",
|
||||
"g u": "vim::PushLowercase",
|
||||
"g shift-u": "vim::PushUppercase",
|
||||
"g ~": "vim::PushOppositeCase",
|
||||
"g q": "vim::PushRewrap",
|
||||
"g w": "vim::PushRewrap",
|
||||
"insert": "vim::InsertBefore",
|
||||
"alt-.": "vim::RepeatFind",
|
||||
"alt-s": ["editor::SplitSelectionIntoLines", { "keep_selections": true }],
|
||||
// tree-sitter related commands
|
||||
"[ x": "editor::SelectLargerSyntaxNode",
|
||||
"] x": "editor::SelectSmallerSyntaxNode",
|
||||
"] d": "editor::GoToDiagnostic",
|
||||
"[ d": "editor::GoToPreviousDiagnostic",
|
||||
"] c": "editor::GoToHunk",
|
||||
"[ c": "editor::GoToPreviousHunk",
|
||||
// Goto mode
|
||||
"g n": "pane::ActivateNextItem",
|
||||
"g p": "pane::ActivatePreviousItem",
|
||||
@@ -496,6 +460,9 @@
|
||||
"space c": "editor::ToggleComments",
|
||||
"space y": "editor::Copy",
|
||||
"space p": "editor::Paste",
|
||||
// Match mode
|
||||
"m m": "vim::Matching",
|
||||
"m i w": ["workspace::SendKeystrokes", "v i w"],
|
||||
"shift-u": "editor::Redo",
|
||||
"ctrl-c": "editor::ToggleComments",
|
||||
"d": "vim::HelixDelete",
|
||||
@@ -564,7 +531,7 @@
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "vim_operator == a || vim_operator == i || vim_operator == cs || vim_operator == helix_next || vim_operator == helix_previous",
|
||||
"context": "vim_operator == a || vim_operator == i || vim_operator == cs",
|
||||
"bindings": {
|
||||
"w": "vim::Word",
|
||||
"shift-w": ["vim::Word", { "ignore_punctuation": true }],
|
||||
@@ -580,18 +547,18 @@
|
||||
// "q": "vim::AnyQuotes",
|
||||
"q": "vim::MiniQuotes",
|
||||
"|": "vim::VerticalBars",
|
||||
"(": ["vim::Parentheses", { "opening": true }],
|
||||
"(": "vim::Parentheses",
|
||||
")": "vim::Parentheses",
|
||||
"b": "vim::Parentheses",
|
||||
// "b": "vim::AnyBrackets",
|
||||
// "b": "vim::MiniBrackets",
|
||||
"[": ["vim::SquareBrackets", { "opening": true }],
|
||||
"[": "vim::SquareBrackets",
|
||||
"]": "vim::SquareBrackets",
|
||||
"r": "vim::SquareBrackets",
|
||||
"{": ["vim::CurlyBrackets", { "opening": true }],
|
||||
"{": "vim::CurlyBrackets",
|
||||
"}": "vim::CurlyBrackets",
|
||||
"shift-b": "vim::CurlyBrackets",
|
||||
"<": ["vim::AngleBrackets", { "opening": true }],
|
||||
"<": "vim::AngleBrackets",
|
||||
">": "vim::AngleBrackets",
|
||||
"a": "vim::Argument",
|
||||
"i": "vim::IndentObj",
|
||||
@@ -601,48 +568,6 @@
|
||||
"e": "vim::EntireFile"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "vim_operator == helix_m",
|
||||
"bindings": {
|
||||
"m": "vim::Matching"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "vim_operator == helix_next",
|
||||
"bindings": {
|
||||
"z": "vim::NextSectionStart",
|
||||
"shift-z": "vim::NextSectionEnd",
|
||||
"*": "vim::NextComment",
|
||||
"/": "vim::NextComment",
|
||||
"-": "vim::NextLesserIndent",
|
||||
"+": "vim::NextGreaterIndent",
|
||||
"=": "vim::NextSameIndent",
|
||||
"b": "pane::ActivateNextItem",
|
||||
"shift-b": "pane::ActivateLastItem",
|
||||
"x": "editor::SelectSmallerSyntaxNode",
|
||||
"d": "editor::GoToDiagnostic",
|
||||
"c": "editor::GoToHunk",
|
||||
"space": "vim::InsertEmptyLineBelow"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "vim_operator == helix_previous",
|
||||
"bindings": {
|
||||
"z": "vim::PreviousSectionStart",
|
||||
"shift-z": "vim::PreviousSectionEnd",
|
||||
"*": "vim::PreviousComment",
|
||||
"/": "vim::PreviousComment",
|
||||
"-": "vim::PreviousLesserIndent",
|
||||
"+": "vim::PreviousGreaterIndent",
|
||||
"=": "vim::PreviousSameIndent",
|
||||
"b": "pane::ActivatePreviousItem",
|
||||
"shift-b": ["pane::ActivateItem", 0],
|
||||
"x": "editor::SelectLargerSyntaxNode",
|
||||
"d": "editor::GoToPreviousDiagnostic",
|
||||
"c": "editor::GoToPreviousHunk",
|
||||
"space": "vim::InsertEmptyLineAbove"
|
||||
}
|
||||
},
|
||||
{
|
||||
"context": "vim_operator == c",
|
||||
"bindings": {
|
||||
@@ -884,18 +809,16 @@
|
||||
"/": "project_panel::NewSearchInDirectory",
|
||||
"d": "project_panel::NewDirectory",
|
||||
"enter": "project_panel::OpenPermanent",
|
||||
"escape": "vim::ToggleProjectPanelFocus",
|
||||
"escape": "project_panel::ToggleFocus",
|
||||
"h": "project_panel::CollapseSelectedEntry",
|
||||
"j": "vim::MenuSelectNext",
|
||||
"k": "vim::MenuSelectPrevious",
|
||||
"down": "vim::MenuSelectNext",
|
||||
"up": "vim::MenuSelectPrevious",
|
||||
"j": "menu::SelectNext",
|
||||
"k": "menu::SelectPrevious",
|
||||
"l": "project_panel::ExpandSelectedEntry",
|
||||
"o": "project_panel::OpenPermanent",
|
||||
"shift-d": "project_panel::Delete",
|
||||
"shift-r": "project_panel::Rename",
|
||||
"t": "project_panel::OpenPermanent",
|
||||
"v": "project_panel::OpenSplitVertical",
|
||||
"o": "project_panel::OpenSplitHorizontal",
|
||||
"v": "project_panel::OpenPermanent",
|
||||
"p": "project_panel::Open",
|
||||
"x": "project_panel::RevealInFileManager",
|
||||
"s": "workspace::OpenWithSystem",
|
||||
@@ -908,22 +831,7 @@
|
||||
"{": "project_panel::SelectPrevDirectory",
|
||||
"shift-g": "menu::SelectLast",
|
||||
"g g": "menu::SelectFirst",
|
||||
"-": "project_panel::SelectParent",
|
||||
"ctrl-u": "project_panel::ScrollUp",
|
||||
"ctrl-d": "project_panel::ScrollDown",
|
||||
"z t": "project_panel::ScrollCursorTop",
|
||||
"z z": "project_panel::ScrollCursorCenter",
|
||||
"z b": "project_panel::ScrollCursorBottom",
|
||||
"0": ["vim::Number", 0],
|
||||
"1": ["vim::Number", 1],
|
||||
"2": ["vim::Number", 2],
|
||||
"3": ["vim::Number", 3],
|
||||
"4": ["vim::Number", 4],
|
||||
"5": ["vim::Number", 5],
|
||||
"6": ["vim::Number", 6],
|
||||
"7": ["vim::Number", 7],
|
||||
"8": ["vim::Number", 8],
|
||||
"9": ["vim::Number", 9]
|
||||
"-": "project_panel::SelectParent"
|
||||
}
|
||||
},
|
||||
{
|
||||
|
||||
@@ -172,7 +172,7 @@ The user has specified the following rules that should be applied:
|
||||
Rules title: {{title}}
|
||||
{{/if}}
|
||||
``````
|
||||
{{contents}}
|
||||
{{contents}}}
|
||||
``````
|
||||
{{/each}}
|
||||
{{/if}}
|
||||
|
||||
@@ -29,9 +29,7 @@ Generate {{content_type}} based on the following prompt:
|
||||
|
||||
Match the indentation in the original file in the inserted {{content_type}}, don't include any indentation on blank lines.
|
||||
|
||||
Return ONLY the {{content_type}} to insert. Do NOT include any XML tags like <document>, <insert_here>, or any surrounding markup from the input.
|
||||
|
||||
Respond with a code block containing the {{content_type}} to insert. Replace \{{INSERTED_CODE}} with your actual {{content_type}}:
|
||||
Immediately start with the following format with no remarks:
|
||||
|
||||
```
|
||||
\{{INSERTED_CODE}}
|
||||
@@ -68,9 +66,7 @@ Only make changes that are necessary to fulfill the prompt, leave everything els
|
||||
|
||||
Start at the indentation level in the original file in the rewritten {{content_type}}. Don't stop until you've rewritten the entire section, even if you have no more changes to make, always write out the whole section with no unnecessary elisions.
|
||||
|
||||
Return ONLY the rewritten {{content_type}}. Do NOT include any XML tags like <document>, <rewrite_this>, or any surrounding markup from the input.
|
||||
|
||||
Respond with a code block containing the rewritten {{content_type}}. Replace \{{REWRITTEN_CODE}} with your actual rewritten {{content_type}}:
|
||||
Immediately start with the following format with no remarks:
|
||||
|
||||
```
|
||||
\{{REWRITTEN_CODE}}
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
{
|
||||
"$schema": "zed://schemas/settings",
|
||||
/// The displayed name of this project. If not set or empty, the root directory name
|
||||
/// will be displayed.
|
||||
"project_name": "",
|
||||
// The name of the Zed theme to use for the UI.
|
||||
//
|
||||
// `mode` is one of:
|
||||
@@ -75,10 +71,8 @@
|
||||
"ui_font_weight": 400,
|
||||
// The default font size for text in the UI
|
||||
"ui_font_size": 16,
|
||||
// The default font size for agent responses in the agent panel. Falls back to the UI font size if unset.
|
||||
"agent_ui_font_size": null,
|
||||
// The default font size for user messages in the agent panel.
|
||||
"agent_buffer_font_size": 12,
|
||||
// The default font size for text in the agent panel. Falls back to the UI font size if unset.
|
||||
"agent_font_size": null,
|
||||
// How much to fade out unused code.
|
||||
"unnecessary_code_fade": 0.3,
|
||||
// Active pane styling settings.
|
||||
@@ -120,7 +114,6 @@
|
||||
// Whether to enable vim modes and key bindings.
|
||||
"vim_mode": false,
|
||||
// Whether to enable helix mode and key bindings.
|
||||
// Enabling this mode will automatically enable vim mode.
|
||||
"helix_mode": false,
|
||||
// Whether to show the informational hover box when moving the mouse
|
||||
// over symbols in the editor.
|
||||
@@ -195,8 +188,8 @@
|
||||
// 4. A box drawn around the following character
|
||||
// "hollow"
|
||||
//
|
||||
// Default: "bar"
|
||||
"cursor_shape": "bar",
|
||||
// Default: not set, defaults to "bar"
|
||||
"cursor_shape": null,
|
||||
// Determines when the mouse cursor should be hidden in an editor or input box.
|
||||
//
|
||||
// 1. Never hide the mouse cursor:
|
||||
@@ -230,25 +223,9 @@
|
||||
"current_line_highlight": "all",
|
||||
// Whether to highlight all occurrences of the selected text in an editor.
|
||||
"selection_highlight": true,
|
||||
// Whether the text selection should have rounded corners.
|
||||
"rounded_selection": true,
|
||||
// The debounce delay before querying highlights from the language
|
||||
// server based on the current cursor location.
|
||||
"lsp_highlight_debounce": 75,
|
||||
// The minimum APCA perceptual contrast between foreground and background colors.
|
||||
// APCA (Accessible Perceptual Contrast Algorithm) is more accurate than WCAG 2.x,
|
||||
// especially for dark mode. Values range from 0 to 106.
|
||||
//
|
||||
// Based on APCA Readability Criterion (ARC) Bronze Simple Mode:
|
||||
// https://readtech.org/ARC/tests/bronze-simple-mode/
|
||||
// - 0: No contrast adjustment
|
||||
// - 45: Minimum for large fluent text (36px+)
|
||||
// - 60: Minimum for other content text
|
||||
// - 75: Minimum for body text
|
||||
// - 90: Preferred for body text
|
||||
//
|
||||
// This only affects text drawn over highlight backgrounds in the editor.
|
||||
"minimum_contrast_for_highlights": 45,
|
||||
// Whether to pop the completions menu while typing in an editor without
|
||||
// explicitly requesting it.
|
||||
"show_completions_on_input": true,
|
||||
@@ -289,8 +266,8 @@
|
||||
// - "warning"
|
||||
// - "info"
|
||||
// - "hint"
|
||||
// - "all" — allow all diagnostics (default)
|
||||
"diagnostics_max_severity": "all",
|
||||
// - null — allow all diagnostics (default)
|
||||
"diagnostics_max_severity": null,
|
||||
// Whether to show wrap guides (vertical rulers) in the editor.
|
||||
// Setting this to true will show a guide at the 'preferred_line_length' value
|
||||
// if 'soft_wrap' is set to 'preferred_line_length', and will show any
|
||||
@@ -302,8 +279,6 @@
|
||||
"redact_private_values": false,
|
||||
// The default number of lines to expand excerpts in the multibuffer by.
|
||||
"expand_excerpt_lines": 5,
|
||||
// The default number of context lines shown in multibuffer excerpts.
|
||||
"excerpt_context_lines": 2,
|
||||
// Globs to match against file paths to determine if a file is private.
|
||||
"private_files": ["**/.env*", "**/*.pem", "**/*.key", "**/*.cert", "**/*.crt", "**/secrets.yml"],
|
||||
// Whether to use additional LSP queries to format (and amend) the code after
|
||||
@@ -317,7 +292,7 @@
|
||||
// bracket, brace, single or double quote characters.
|
||||
// For example, when you select text and type (, Zed will surround the text with ().
|
||||
"use_auto_surround": true,
|
||||
// Whether indentation should be adjusted based on the context whilst typing.
|
||||
/// Whether indentation should be adjusted based on the context whilst typing.
|
||||
"auto_indent": true,
|
||||
// Whether indentation of pasted content should be adjusted based on the context.
|
||||
"auto_indent_on_paste": true,
|
||||
@@ -368,11 +343,6 @@
|
||||
// - It is adjacent to an edge (start or end)
|
||||
// - It is adjacent to a whitespace (left or right)
|
||||
"show_whitespaces": "selection",
|
||||
// Visible characters used to render whitespace when show_whitespaces is enabled.
|
||||
"whitespace_map": {
|
||||
"space": "•",
|
||||
"tab": "→"
|
||||
},
|
||||
// Settings related to calls in Zed
|
||||
"calls": {
|
||||
// Join calls with the microphone live by default
|
||||
@@ -412,39 +382,6 @@
|
||||
// Whether to show the menus in the titlebar.
|
||||
"show_menus": false
|
||||
},
|
||||
"audio": {
|
||||
// Opt into the new audio system.
|
||||
"experimental.rodio_audio": false,
|
||||
// Requires 'rodio_audio: true'
|
||||
//
|
||||
// Automatically increase or decrease you microphone's volume. This affects how
|
||||
// loud you sound to others.
|
||||
//
|
||||
// Recommended: off (default)
|
||||
// Microphones are too quite in zed, until everyone is on experimental
|
||||
// audio and has auto speaker volume on this will make you very loud
|
||||
// compared to other speakers.
|
||||
"experimental.auto_microphone_volume": false,
|
||||
// Requires 'rodio_audio: true'
|
||||
//
|
||||
// Automatically increate or decrease the volume of other call members.
|
||||
// This only affects how things sound for you.
|
||||
"experimental.auto_speaker_volume": true,
|
||||
// Requires 'rodio_audio: true'
|
||||
//
|
||||
// Remove background noises. Works great for typing, cars, dogs, AC. Does
|
||||
// not work well on music.
|
||||
"experimental.denoise": true,
|
||||
// Requires 'rodio_audio: true'
|
||||
//
|
||||
// Use audio parameters compatible with the previous versions of
|
||||
// experimental audio and non-experimental audio. When this is false you
|
||||
// will sound strange to anyone not on the latest experimental audio. In
|
||||
// the future we will migrate by setting this to false
|
||||
//
|
||||
// You need to rejoin a call for this setting to apply
|
||||
"experimental.legacy_audio_compatible": true
|
||||
},
|
||||
// Scrollbar related settings
|
||||
"scrollbar": {
|
||||
// When to show the scrollbar in the editor.
|
||||
@@ -625,7 +562,6 @@
|
||||
// Toggle certain types of hints on and off, all switched on by default.
|
||||
"show_type_hints": true,
|
||||
"show_parameter_hints": true,
|
||||
"show_value_hints": true,
|
||||
// Corresponds to null/None LSP hint type value.
|
||||
"show_other_hints": true,
|
||||
// Whether to show a background for inlay hints.
|
||||
@@ -722,9 +658,7 @@
|
||||
// Whether to enable drag-and-drop operations in the project panel.
|
||||
"drag_and_drop": true,
|
||||
// Whether to hide the root entry when only one folder is open in the window.
|
||||
"hide_root": false,
|
||||
// Whether to hide the hidden entries in the project panel.
|
||||
"hide_hidden": false
|
||||
"hide_root": false
|
||||
},
|
||||
"outline_panel": {
|
||||
// Whether to show the outline panel button in the status bar
|
||||
@@ -788,6 +722,16 @@
|
||||
// Default width of the collaboration panel.
|
||||
"default_width": 240
|
||||
},
|
||||
"chat_panel": {
|
||||
// When to show the chat panel button in the status bar.
|
||||
// Can be 'never', 'always', or 'when_in_call',
|
||||
// or a boolean (interpreted as 'never'/'always').
|
||||
"button": "when_in_call",
|
||||
// Where to dock the chat panel. Can be 'left' or 'right'.
|
||||
"dock": "right",
|
||||
// Default width of the chat panel.
|
||||
"default_width": 240
|
||||
},
|
||||
"git_panel": {
|
||||
// Whether to show the git panel button in the status bar.
|
||||
"button": true,
|
||||
@@ -836,7 +780,7 @@
|
||||
"agent": {
|
||||
// Whether the agent is enabled.
|
||||
"enabled": true,
|
||||
// What completion mode to start new threads in, if available. Can be 'normal' or 'burn'.
|
||||
/// What completion mode to start new threads in, if available. Can be 'normal' or 'burn'.
|
||||
"preferred_completion_mode": "normal",
|
||||
// Whether to show the agent panel button in the status bar.
|
||||
"button": true,
|
||||
@@ -846,8 +790,6 @@
|
||||
"default_width": 640,
|
||||
// Default height when the agent panel is docked to the bottom.
|
||||
"default_height": 320,
|
||||
// The view to use by default (thread, or text_thread)
|
||||
"default_view": "thread",
|
||||
// The default model to use when creating new threads.
|
||||
"default_model": {
|
||||
// The provider to use.
|
||||
@@ -878,9 +820,6 @@
|
||||
// }
|
||||
],
|
||||
// When enabled, the agent can run potentially destructive actions without asking for your confirmation.
|
||||
//
|
||||
// Note: This setting has no effect on external agents that support permission modes, such as Claude Code.
|
||||
// You can set `agent_servers.claude.default_mode` to `bypassPermissions` to skip all permission requests.
|
||||
"always_allow_tool_actions": false,
|
||||
// When enabled, the agent will stream edits.
|
||||
"stream_edits": false,
|
||||
@@ -906,7 +845,6 @@
|
||||
"now": true,
|
||||
"find_path": true,
|
||||
"read_file": true,
|
||||
"open": true,
|
||||
"grep": true,
|
||||
"terminal": true,
|
||||
"thinking": true,
|
||||
@@ -918,6 +856,7 @@
|
||||
// We don't know which of the context server tools are safe for the "Ask" profile, so we don't enable them by default.
|
||||
// "enable_all_context_servers": true,
|
||||
"tools": {
|
||||
"contents": true,
|
||||
"diagnostics": true,
|
||||
"fetch": true,
|
||||
"list_directory": true,
|
||||
@@ -949,22 +888,22 @@
|
||||
|
||||
// Default: false
|
||||
"play_sound_when_agent_done": false,
|
||||
// Whether to have edit cards in the agent panel expanded, showing a preview of the full diff.
|
||||
//
|
||||
// Default: true
|
||||
/// Whether to have edit cards in the agent panel expanded, showing a preview of the full diff.
|
||||
///
|
||||
/// Default: true
|
||||
"expand_edit_card": true,
|
||||
// Whether to have terminal cards in the agent panel expanded, showing the whole command output.
|
||||
//
|
||||
// Default: true
|
||||
"expand_terminal_card": true,
|
||||
// Whether to always use cmd-enter (or ctrl-enter on Linux or Windows) to send messages in the agent panel.
|
||||
//
|
||||
// Default: false
|
||||
"use_modifier_to_send": false,
|
||||
// Minimum number of lines to display in the agent message editor.
|
||||
//
|
||||
// Default: 4
|
||||
"message_editor_min_lines": 4
|
||||
/// Whether to have terminal cards in the agent panel expanded, showing the whole command output.
|
||||
///
|
||||
/// Default: true
|
||||
"expand_terminal_card": true
|
||||
},
|
||||
// The settings for slash commands.
|
||||
"slash_commands": {
|
||||
// Settings for the `/project` slash command.
|
||||
"project": {
|
||||
// Whether `/project` is enabled.
|
||||
"enabled": false
|
||||
}
|
||||
},
|
||||
// Whether the screen sharing icon is shown in the os status bar.
|
||||
"show_call_status_icon": true,
|
||||
@@ -977,7 +916,6 @@
|
||||
//
|
||||
// This is typically customized on a per-language basis.
|
||||
"language_servers": ["..."],
|
||||
|
||||
// When to automatically save edited buffers. This setting can
|
||||
// take four values.
|
||||
//
|
||||
@@ -1006,7 +944,7 @@
|
||||
// Show git status colors in the editor tabs.
|
||||
"git_status": false,
|
||||
// Position of the close button on the editor tabs.
|
||||
// One of: ["right", "left"]
|
||||
// One of: ["right", "left", "hidden"]
|
||||
"close_position": "right",
|
||||
// Whether to show the file icon for a tab.
|
||||
"file_icons": false,
|
||||
@@ -1104,31 +1042,25 @@
|
||||
// Removes any lines containing only whitespace at the end of the file and
|
||||
// ensures just one newline at the end.
|
||||
"ensure_final_newline_on_save": true,
|
||||
// Whether or not to perform a buffer format before saving: [on, off]
|
||||
// Whether or not to perform a buffer format before saving: [on, off, prettier, language_server]
|
||||
// Keep in mind, if the autosave with delay is enabled, format_on_save will be ignored
|
||||
"format_on_save": "on",
|
||||
// How to perform a buffer format. This setting can take multiple values:
|
||||
// How to perform a buffer format. This setting can take 4 values:
|
||||
//
|
||||
// 1. Default. Format files using Zed's Prettier integration (if applicable),
|
||||
// or falling back to formatting via language server:
|
||||
// "formatter": "auto"
|
||||
// 2. Format code using the current language server:
|
||||
// 1. Format code using the current language server:
|
||||
// "formatter": "language_server"
|
||||
// 3. Format code using a specific language server:
|
||||
// "formatter": {"language_server": {"name": "ruff"}}
|
||||
// 4. Format code using an external command:
|
||||
// 2. Format code using an external command:
|
||||
// "formatter": {
|
||||
// "external": {
|
||||
// "command": "prettier",
|
||||
// "arguments": ["--stdin-filepath", "{buffer_path}"]
|
||||
// }
|
||||
// }
|
||||
// 5. Format code using Zed's Prettier integration:
|
||||
// 3. Format code using Zed's Prettier integration:
|
||||
// "formatter": "prettier"
|
||||
// 6. Format code using a code action
|
||||
// "formatter": {"code_action": "source.fixAll.eslint"}
|
||||
// 7. An array of any format step specified above to apply in order
|
||||
// "formatter": [{"code_action": "source.fixAll.eslint"}, "prettier"]
|
||||
// 4. Default. Format files using Zed's Prettier integration (if applicable),
|
||||
// or falling back to formatting via language server:
|
||||
// "formatter": "auto"
|
||||
"formatter": "auto",
|
||||
// How to soft-wrap long lines of text.
|
||||
// Possible values:
|
||||
@@ -1240,10 +1172,6 @@
|
||||
// 2. Hide the gutter
|
||||
// "git_gutter": "hide"
|
||||
"git_gutter": "tracked_files",
|
||||
/// Sets the debounce threshold (in milliseconds) after which changes are reflected in the git gutter.
|
||||
///
|
||||
/// Default: 0
|
||||
"gutter_debounce": 0,
|
||||
// Control whether the git blame information is shown inline,
|
||||
// in the currently focused line.
|
||||
"inline_blame": {
|
||||
@@ -1259,13 +1187,6 @@
|
||||
// The minimum column number to show the inline blame information at
|
||||
"min_column": 0
|
||||
},
|
||||
"blame": {
|
||||
"show_avatar": true
|
||||
},
|
||||
// Control which information is shown in the branch picker.
|
||||
"branch_picker": {
|
||||
"show_author_name": true
|
||||
},
|
||||
// How git hunks are displayed visually in the editor.
|
||||
// This setting can take two values:
|
||||
//
|
||||
@@ -1320,15 +1241,6 @@
|
||||
// "proxy": "",
|
||||
// "proxy_no_verify": false
|
||||
// },
|
||||
"copilot": {
|
||||
"enterprise_uri": null,
|
||||
"proxy": null,
|
||||
"proxy_no_verify": null
|
||||
},
|
||||
"codestral": {
|
||||
"model": null,
|
||||
"max_tokens": null
|
||||
},
|
||||
// Whether edit predictions are enabled when editing text threads.
|
||||
// This setting has no effect if globally disabled.
|
||||
"enabled_in_text_threads": true
|
||||
@@ -1345,8 +1257,6 @@
|
||||
},
|
||||
// Status bar-related settings.
|
||||
"status_bar": {
|
||||
// Whether to show the status bar.
|
||||
"experimental.show": true,
|
||||
// Whether to show the active language button in the status bar.
|
||||
"active_language_button": true,
|
||||
// Whether to show the cursor position button in the status bar.
|
||||
@@ -1413,8 +1323,8 @@
|
||||
// 4. A box drawn around the following character
|
||||
// "hollow"
|
||||
//
|
||||
// Default: "block"
|
||||
"cursor_shape": "block",
|
||||
// Default: not set, defaults to "block"
|
||||
"cursor_shape": null,
|
||||
// Set whether Alternate Scroll mode (code: ?1007) is active by default.
|
||||
// Alternate Scroll mode converts mouse scroll events into up / down key
|
||||
// presses when in the alternate screen (e.g. when running applications
|
||||
@@ -1436,8 +1346,8 @@
|
||||
// Whether or not selecting text in the terminal will automatically
|
||||
// copy to the system clipboard.
|
||||
"copy_on_select": false,
|
||||
// Whether to keep the text selection after copying it to the clipboard.
|
||||
"keep_selection_on_copy": true,
|
||||
// Whether to keep the text selection after copying it to the clipboard
|
||||
"keep_selection_on_copy": false,
|
||||
// Whether to show the terminal button in the status bar
|
||||
"button": true,
|
||||
// Any key-value pairs added to this list will be added to the terminal's
|
||||
@@ -1456,7 +1366,7 @@
|
||||
// "line_height": {
|
||||
// "custom": 2
|
||||
// },
|
||||
"line_height": "standard",
|
||||
"line_height": "comfortable",
|
||||
// Activate the python virtual environment, if one is found, in the
|
||||
// terminal's working directory (as resolved by the working_directory
|
||||
// setting). Set this to "off" to disable this behavior.
|
||||
@@ -1476,7 +1386,7 @@
|
||||
//
|
||||
// The shell running in the terminal needs to be configured to emit the title.
|
||||
// Example: `echo -e "\e]2;New Title\007";`
|
||||
"breadcrumbs": false
|
||||
"breadcrumbs": true
|
||||
},
|
||||
// Scrollbar-related settings
|
||||
"scrollbar": {
|
||||
@@ -1527,6 +1437,7 @@
|
||||
// A value of 45 preserves colorful themes while ensuring legibility.
|
||||
"minimum_contrast": 45
|
||||
},
|
||||
"code_actions_on_format": {},
|
||||
// Settings related to running tasks.
|
||||
"tasks": {
|
||||
"variables": {},
|
||||
@@ -1555,7 +1466,7 @@
|
||||
// }
|
||||
//
|
||||
"file_types": {
|
||||
"JSONC": ["**/.zed/**/*.json", "**/zed/**/*.json", "**/Zed/**/*.json", "**/.vscode/**/*.json", "tsconfig*.json"],
|
||||
"JSONC": ["**/.zed/**/*.json", "**/zed/**/*.json", "**/Zed/**/*.json", "**/.vscode/**/*.json"],
|
||||
"Shell Script": [".env.*"]
|
||||
},
|
||||
// Settings for which version of Node.js and NPM to use when installing
|
||||
@@ -1581,14 +1492,6 @@
|
||||
"auto_install_extensions": {
|
||||
"html": true
|
||||
},
|
||||
// The capabilities granted to extensions.
|
||||
//
|
||||
// This list can be customized to restrict what extensions are able to do.
|
||||
"granted_extension_capabilities": [
|
||||
{ "kind": "process:exec", "command": "*", "args": ["**"] },
|
||||
{ "kind": "download_file", "host": "*", "path": ["**"] },
|
||||
{ "kind": "npm:install", "package": "*" }
|
||||
],
|
||||
// Controls how completions are processed for this language.
|
||||
"completions": {
|
||||
// Controls how words are completed.
|
||||
@@ -1682,7 +1585,7 @@
|
||||
"ensure_final_newline_on_save": false
|
||||
},
|
||||
"Elixir": {
|
||||
"language_servers": ["elixir-ls", "!expert", "!next-ls", "!lexical", "..."]
|
||||
"language_servers": ["elixir-ls", "!next-ls", "!lexical", "..."]
|
||||
},
|
||||
"Elm": {
|
||||
"tab_size": 4
|
||||
@@ -1696,7 +1599,9 @@
|
||||
"preferred_line_length": 72
|
||||
},
|
||||
"Go": {
|
||||
"formatter": [{ "code_action": "source.organizeImports" }, "language_server"],
|
||||
"code_actions_on_format": {
|
||||
"source.organizeImports": true
|
||||
},
|
||||
"debuggers": ["Delve"]
|
||||
},
|
||||
"GraphQL": {
|
||||
@@ -1705,7 +1610,7 @@
|
||||
}
|
||||
},
|
||||
"HEEX": {
|
||||
"language_servers": ["elixir-ls", "!expert", "!next-ls", "!lexical", "..."]
|
||||
"language_servers": ["elixir-ls", "!next-ls", "!lexical", "..."]
|
||||
},
|
||||
"HTML": {
|
||||
"prettier": {
|
||||
@@ -1766,11 +1671,6 @@
|
||||
"allow_rewrap": "anywhere"
|
||||
},
|
||||
"Python": {
|
||||
"formatter": {
|
||||
"language_server": {
|
||||
"name": "ruff"
|
||||
}
|
||||
},
|
||||
"debuggers": ["Debugpy"]
|
||||
},
|
||||
"Ruby": {
|
||||
@@ -1841,7 +1741,6 @@
|
||||
"anthropic": {
|
||||
"api_url": "https://api.anthropic.com"
|
||||
},
|
||||
"bedrock": {},
|
||||
"google": {
|
||||
"api_url": "https://generativelanguage.googleapis.com"
|
||||
},
|
||||
@@ -1859,45 +1758,31 @@
|
||||
"api_url": "http://localhost:1234/api/v0"
|
||||
},
|
||||
"deepseek": {
|
||||
"api_url": "https://api.deepseek.com/v1"
|
||||
"api_url": "https://api.deepseek.com"
|
||||
},
|
||||
"mistral": {
|
||||
"api_url": "https://api.mistral.ai/v1"
|
||||
},
|
||||
"vercel": {
|
||||
"api_url": "https://api.v0.dev/v1"
|
||||
},
|
||||
"x_ai": {
|
||||
"api_url": "https://api.x.ai/v1"
|
||||
},
|
||||
"zed.dev": {}
|
||||
},
|
||||
"session": {
|
||||
// Whether or not to restore unsaved buffers on restart.
|
||||
//
|
||||
// If this is true, user won't be prompted whether to save/discard
|
||||
// dirty files when closing the application.
|
||||
//
|
||||
// Default: true
|
||||
"restore_unsaved_buffers": true
|
||||
}
|
||||
},
|
||||
// Zed's Prettier integration settings.
|
||||
// Allows to enable/disable formatting with Prettier
|
||||
// and configure default Prettier, used when no project-level Prettier installation is found.
|
||||
"prettier": {
|
||||
// Enables or disables formatting with Prettier for any given language.
|
||||
"allowed": false,
|
||||
// Forces Prettier integration to use a specific parser name when formatting files with the language.
|
||||
"plugins": [],
|
||||
// Default Prettier options, in the format as in package.json section for Prettier.
|
||||
// If project installs Prettier via its package.json, these options will be ignored.
|
||||
// // Whether to consider prettier formatter or not when attempting to format a file.
|
||||
// "allowed": false,
|
||||
//
|
||||
// // Use regular Prettier json configuration.
|
||||
// // If Prettier is allowed, Zed will use this for its Prettier instance for any applicable file, if
|
||||
// // the project has no other Prettier installed.
|
||||
// "plugins": [],
|
||||
//
|
||||
// // Use regular Prettier json configuration.
|
||||
// // If Prettier is allowed, Zed will use this for its Prettier instance for any applicable file, if
|
||||
// // the project has no other Prettier installed.
|
||||
// "trailingComma": "es5",
|
||||
// "tabWidth": 4,
|
||||
// "semi": false,
|
||||
// "singleQuote": true
|
||||
// Forces Prettier integration to use a specific parser name when formatting files with the language
|
||||
// when set to a non-empty string.
|
||||
"parser": ""
|
||||
},
|
||||
// Settings for auto-closing of JSX tags.
|
||||
"jsx_tag_auto_close": {
|
||||
@@ -1917,10 +1802,6 @@
|
||||
// }
|
||||
// }
|
||||
},
|
||||
// DAP Specific settings.
|
||||
"dap": {
|
||||
// Specify the DAP name as a key here.
|
||||
},
|
||||
// Common language server settings.
|
||||
"global_lsp_settings": {
|
||||
// Whether to show the LSP servers button in the status bar.
|
||||
@@ -1928,23 +1809,13 @@
|
||||
},
|
||||
// Jupyter settings
|
||||
"jupyter": {
|
||||
"enabled": true,
|
||||
"kernel_selections": {}
|
||||
"enabled": true
|
||||
// Specify the language name as the key and the kernel name as the value.
|
||||
// "kernel_selections": {
|
||||
// "python": "conda-base"
|
||||
// "typescript": "deno"
|
||||
// }
|
||||
},
|
||||
// REPL settings.
|
||||
"repl": {
|
||||
// Maximum number of columns to keep in REPL's scrollback buffer.
|
||||
// Clamped with [20, 512] range.
|
||||
"max_columns": 128,
|
||||
// Maximum number of lines to keep in REPL's scrollback buffer.
|
||||
// Clamped with [4, 256] range.
|
||||
"max_lines": 32
|
||||
},
|
||||
// Vim settings
|
||||
"vim": {
|
||||
"default_mode": "normal",
|
||||
@@ -2035,10 +1906,7 @@
|
||||
"debugger": {
|
||||
"stepping_granularity": "line",
|
||||
"save_breakpoints": true,
|
||||
"timeout": 2000,
|
||||
"dock": "bottom",
|
||||
"log_dap_communications": true,
|
||||
"format_dap_log_messages": true,
|
||||
"button": true
|
||||
},
|
||||
// Configures any number of settings profiles that are temporarily applied on
|
||||
@@ -2047,7 +1915,7 @@
|
||||
// Examples:
|
||||
// "profiles": {
|
||||
// "Presenting": {
|
||||
// "agent_ui_font_size": 20.0,
|
||||
// "agent_font_size": 20.0,
|
||||
// "buffer_font_size": 20.0,
|
||||
// "theme": "One Light",
|
||||
// "ui_font_size": 20.0
|
||||
@@ -2060,11 +1928,5 @@
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
"profiles": {},
|
||||
|
||||
// A map of log scopes to the desired log level.
|
||||
// Useful for filtering out noisy logs or enabling more verbose logging.
|
||||
//
|
||||
// Example: {"log": {"client": "warn"}}
|
||||
"log": {}
|
||||
"profiles": []
|
||||
}
|
||||
|
||||
@@ -43,11 +43,7 @@
|
||||
// "args": ["--login"]
|
||||
// }
|
||||
// }
|
||||
"shell": "system",
|
||||
// Whether to show the task line in the output of the spawned task, defaults to `true`.
|
||||
"show_summary": true,
|
||||
// Whether to show the command line in the output of the spawned task, defaults to `true`.
|
||||
"show_command": true
|
||||
"shell": "system"
|
||||
// Represents the tags for inline runnable indicators, or spawning multiple tasks at once.
|
||||
// "tags": []
|
||||
}
|
||||
|
||||
@@ -192,7 +192,7 @@
|
||||
"font_weight": null
|
||||
},
|
||||
"comment": {
|
||||
"color": "#5c6773ff",
|
||||
"color": "#abb5be8c",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
@@ -239,7 +239,7 @@
|
||||
"hint": {
|
||||
"color": "#628b80ff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
"font_weight": 700
|
||||
},
|
||||
"keyword": {
|
||||
"color": "#ff8f3fff",
|
||||
@@ -316,11 +316,6 @@
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.markup": {
|
||||
"color": "#a6a5a0ff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.special": {
|
||||
"color": "#d2a6ffff",
|
||||
"font_style": null,
|
||||
@@ -583,7 +578,7 @@
|
||||
"font_weight": null
|
||||
},
|
||||
"comment": {
|
||||
"color": "#abb0b6ff",
|
||||
"color": "#787b8099",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
@@ -630,7 +625,7 @@
|
||||
"hint": {
|
||||
"color": "#8ca7c2ff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
"font_weight": 700
|
||||
},
|
||||
"keyword": {
|
||||
"color": "#fa8d3eff",
|
||||
@@ -707,11 +702,6 @@
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.markup": {
|
||||
"color": "#73777bff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.special": {
|
||||
"color": "#a37accff",
|
||||
"font_style": null,
|
||||
@@ -974,7 +964,7 @@
|
||||
"font_weight": null
|
||||
},
|
||||
"comment": {
|
||||
"color": "#5c6773ff",
|
||||
"color": "#b8cfe680",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
@@ -1021,7 +1011,7 @@
|
||||
"hint": {
|
||||
"color": "#7399a3ff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
"font_weight": 700
|
||||
},
|
||||
"keyword": {
|
||||
"color": "#ffad65ff",
|
||||
@@ -1098,11 +1088,6 @@
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.markup": {
|
||||
"color": "#b4b3aeff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.special": {
|
||||
"color": "#dfbfffff",
|
||||
"font_style": null,
|
||||
|
||||
@@ -248,7 +248,7 @@
|
||||
"hint": {
|
||||
"color": "#8c957dff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
"font_weight": 700
|
||||
},
|
||||
"keyword": {
|
||||
"color": "#fb4833ff",
|
||||
@@ -325,11 +325,6 @@
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.markup": {
|
||||
"color": "#83a598ff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.special": {
|
||||
"color": "#e5d5adff",
|
||||
"font_style": null,
|
||||
@@ -653,7 +648,7 @@
|
||||
"hint": {
|
||||
"color": "#8c957dff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
"font_weight": 700
|
||||
},
|
||||
"keyword": {
|
||||
"color": "#fb4833ff",
|
||||
@@ -730,11 +725,6 @@
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.markup": {
|
||||
"color": "#83a598ff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.special": {
|
||||
"color": "#e5d5adff",
|
||||
"font_style": null,
|
||||
@@ -1058,7 +1048,7 @@
|
||||
"hint": {
|
||||
"color": "#8c957dff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
"font_weight": 700
|
||||
},
|
||||
"keyword": {
|
||||
"color": "#fb4833ff",
|
||||
@@ -1135,11 +1125,6 @@
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.markup": {
|
||||
"color": "#83a598ff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.special": {
|
||||
"color": "#e5d5adff",
|
||||
"font_style": null,
|
||||
@@ -1463,7 +1448,7 @@
|
||||
"hint": {
|
||||
"color": "#677562ff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
"font_weight": 700
|
||||
},
|
||||
"keyword": {
|
||||
"color": "#9d0006ff",
|
||||
@@ -1540,11 +1525,6 @@
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.markup": {
|
||||
"color": "#066578ff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.special": {
|
||||
"color": "#413d3aff",
|
||||
"font_style": null,
|
||||
@@ -1868,7 +1848,7 @@
|
||||
"hint": {
|
||||
"color": "#677562ff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
"font_weight": 700
|
||||
},
|
||||
"keyword": {
|
||||
"color": "#9d0006ff",
|
||||
@@ -1945,11 +1925,6 @@
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.markup": {
|
||||
"color": "#066578ff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.special": {
|
||||
"color": "#413d3aff",
|
||||
"font_style": null,
|
||||
@@ -2273,7 +2248,7 @@
|
||||
"hint": {
|
||||
"color": "#677562ff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
"font_weight": 700
|
||||
},
|
||||
"keyword": {
|
||||
"color": "#9d0006ff",
|
||||
@@ -2350,11 +2325,6 @@
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.markup": {
|
||||
"color": "#066578ff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.special": {
|
||||
"color": "#413d3aff",
|
||||
"font_style": null,
|
||||
|
||||
@@ -244,7 +244,7 @@
|
||||
"hint": {
|
||||
"color": "#788ca6ff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
"font_weight": 700
|
||||
},
|
||||
"keyword": {
|
||||
"color": "#b477cfff",
|
||||
@@ -321,11 +321,6 @@
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.markup": {
|
||||
"color": "#d07277ff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.special": {
|
||||
"color": "#b1574bff",
|
||||
"font_style": null,
|
||||
@@ -643,7 +638,7 @@
|
||||
"hint": {
|
||||
"color": "#7274a7ff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
"font_weight": 700
|
||||
},
|
||||
"keyword": {
|
||||
"color": "#a449abff",
|
||||
@@ -720,11 +715,6 @@
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.markup": {
|
||||
"color": "#d3604fff",
|
||||
"font_style": null,
|
||||
"font_weight": null
|
||||
},
|
||||
"punctuation.special": {
|
||||
"color": "#b92b46ff",
|
||||
"font_style": null,
|
||||
|
||||
13
clippy.toml
@@ -5,16 +5,3 @@ ignore-interior-mutability = [
|
||||
# and Hash impls do not use fields with interior mutability.
|
||||
"agent::context::AgentContextKey"
|
||||
]
|
||||
disallowed-methods = [
|
||||
{ path = "std::process::Command::spawn", reason = "Spawning `std::process::Command` can block the current thread for an unknown duration", replacement = "smol::process::Command::spawn" },
|
||||
{ path = "std::process::Command::output", reason = "Spawning `std::process::Command` can block the current thread for an unknown duration", replacement = "smol::process::Command::output" },
|
||||
{ path = "std::process::Command::status", reason = "Spawning `std::process::Command` can block the current thread for an unknown duration", replacement = "smol::process::Command::status" },
|
||||
{ path = "serde_json::from_reader", reason = "Parsing from a buffer is much slower than first reading the buffer into a Vec/String, see https://github.com/serde-rs/json/issues/160#issuecomment-253446892. Use `serde_json::from_slice` instead." },
|
||||
{ path = "serde_json_lenient::from_reader", reason = "Parsing from a buffer is much slower than first reading the buffer into a Vec/String, see https://github.com/serde-rs/json/issues/160#issuecomment-253446892, Use `serde_json_lenient::from_slice` instead." },
|
||||
]
|
||||
disallowed-types = [
|
||||
# { path = "std::collections::HashMap", replacement = "collections::HashMap" },
|
||||
# { path = "std::collections::HashSet", replacement = "collections::HashSet" },
|
||||
# { path = "indexmap::IndexSet", replacement = "collections::IndexSet" },
|
||||
# { path = "indexmap::IndexMap", replacement = "collections::IndexMap" },
|
||||
]
|
||||
|
||||
10
compose.yml
@@ -1,6 +1,6 @@
|
||||
services:
|
||||
postgres:
|
||||
image: docker.io/library/postgres:15
|
||||
image: postgres:15
|
||||
container_name: zed_postgres
|
||||
ports:
|
||||
- 5432:5432
|
||||
@@ -23,7 +23,7 @@ services:
|
||||
- ./.blob_store:/data
|
||||
|
||||
livekit_server:
|
||||
image: docker.io/livekit/livekit-server
|
||||
image: livekit/livekit-server
|
||||
container_name: livekit_server
|
||||
entrypoint: /livekit-server --config /livekit.yaml
|
||||
ports:
|
||||
@@ -34,7 +34,7 @@ services:
|
||||
- ./livekit.yaml:/livekit.yaml
|
||||
|
||||
postgrest_app:
|
||||
image: docker.io/postgrest/postgrest
|
||||
image: postgrest/postgrest
|
||||
container_name: postgrest_app
|
||||
ports:
|
||||
- 8081:8081
|
||||
@@ -47,7 +47,7 @@ services:
|
||||
- postgres
|
||||
|
||||
postgrest_llm:
|
||||
image: docker.io/postgrest/postgrest
|
||||
image: postgrest/postgrest
|
||||
container_name: postgrest_llm
|
||||
ports:
|
||||
- 8082:8082
|
||||
@@ -60,7 +60,7 @@ services:
|
||||
- postgres
|
||||
|
||||
stripe-mock:
|
||||
image: docker.io/stripe/stripe-mock:v0.178.0
|
||||
image: stripe/stripe-mock:v0.178.0
|
||||
ports:
|
||||
- 12111:12111
|
||||
- 12112:12112
|
||||
|
||||
@@ -18,7 +18,6 @@ test-support = ["gpui/test-support", "project/test-support", "dep:parking_lot"]
|
||||
[dependencies]
|
||||
action_log.workspace = true
|
||||
agent-client-protocol.workspace = true
|
||||
agent_settings.workspace = true
|
||||
anyhow.workspace = true
|
||||
buffer_diff.workspace = true
|
||||
collections.workspace = true
|
||||
@@ -31,14 +30,12 @@ language.workspace = true
|
||||
language_model.workspace = true
|
||||
markdown.workspace = true
|
||||
parking_lot = { workspace = true, optional = true }
|
||||
portable-pty.workspace = true
|
||||
project.workspace = true
|
||||
prompt_store.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
settings.workspace = true
|
||||
smol.workspace = true
|
||||
task.workspace = true
|
||||
terminal.workspace = true
|
||||
ui.workspace = true
|
||||
url.workspace = true
|
||||
|
||||
@@ -68,7 +68,7 @@ pub trait AgentConnection {
|
||||
///
|
||||
/// If the agent does not support model selection, returns [None].
|
||||
/// This allows sharing the selector in UI components.
|
||||
fn model_selector(&self, _session_id: &acp::SessionId) -> Option<Rc<dyn AgentModelSelector>> {
|
||||
fn model_selector(&self) -> Option<Rc<dyn AgentModelSelector>> {
|
||||
None
|
||||
}
|
||||
|
||||
@@ -76,13 +76,8 @@ pub trait AgentConnection {
|
||||
None
|
||||
}
|
||||
|
||||
fn session_modes(
|
||||
&self,
|
||||
_session_id: &acp::SessionId,
|
||||
_cx: &App,
|
||||
) -> Option<Rc<dyn AgentSessionModes>> {
|
||||
None
|
||||
}
|
||||
fn list_commands(&self, session_id: &acp::SessionId, cx: &mut App) -> Task<Result<acp::ListCommandsResponse>>;
|
||||
fn run_command(&self, request: acp::RunCommandRequest, cx: &mut App) -> Task<Result<()>>;
|
||||
|
||||
fn into_any(self: Rc<Self>) -> Rc<dyn Any>;
|
||||
}
|
||||
@@ -118,14 +113,6 @@ pub trait AgentTelemetry {
|
||||
) -> Task<Result<serde_json::Value>>;
|
||||
}
|
||||
|
||||
pub trait AgentSessionModes {
|
||||
fn current_mode(&self) -> acp::SessionModeId;
|
||||
|
||||
fn all_modes(&self) -> Vec<acp::SessionMode>;
|
||||
|
||||
fn set_mode(&self, mode: acp::SessionModeId, cx: &mut App) -> Task<Result<()>>;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AuthRequired {
|
||||
pub description: Option<String>,
|
||||
@@ -177,48 +164,61 @@ pub trait AgentModelSelector: 'static {
|
||||
/// If the session doesn't exist or the model is invalid, it returns an error.
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `session_id`: The ID of the session (thread) to apply the model to.
|
||||
/// - `model`: The model to select (should be one from [list_models]).
|
||||
/// - `cx`: The GPUI app context.
|
||||
///
|
||||
/// # Returns
|
||||
/// A task resolving to `Ok(())` on success or an error.
|
||||
fn select_model(&self, model_id: acp::ModelId, cx: &mut App) -> Task<Result<()>>;
|
||||
fn select_model(
|
||||
&self,
|
||||
session_id: acp::SessionId,
|
||||
model_id: AgentModelId,
|
||||
cx: &mut App,
|
||||
) -> Task<Result<()>>;
|
||||
|
||||
/// Retrieves the currently selected model for a specific session (thread).
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `session_id`: The ID of the session (thread) to query.
|
||||
/// - `cx`: The GPUI app context.
|
||||
///
|
||||
/// # Returns
|
||||
/// A task resolving to the selected model (always set) or an error (e.g., session not found).
|
||||
fn selected_model(&self, cx: &mut App) -> Task<Result<AgentModelInfo>>;
|
||||
fn selected_model(
|
||||
&self,
|
||||
session_id: &acp::SessionId,
|
||||
cx: &mut App,
|
||||
) -> Task<Result<AgentModelInfo>>;
|
||||
|
||||
/// Whenever the model list is updated the receiver will be notified.
|
||||
/// Optional for agents that don't update their model list.
|
||||
fn watch(&self, _cx: &mut App) -> Option<watch::Receiver<()>> {
|
||||
None
|
||||
fn watch(&self, cx: &mut App) -> watch::Receiver<()>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct AgentModelId(pub SharedString);
|
||||
|
||||
impl std::ops::Deref for AgentModelId {
|
||||
type Target = SharedString;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for AgentModelId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct AgentModelInfo {
|
||||
pub id: acp::ModelId,
|
||||
pub id: AgentModelId,
|
||||
pub name: SharedString,
|
||||
pub description: Option<SharedString>,
|
||||
pub icon: Option<IconName>,
|
||||
}
|
||||
|
||||
impl From<acp::ModelInfo> for AgentModelInfo {
|
||||
fn from(info: acp::ModelInfo) -> Self {
|
||||
Self {
|
||||
id: info.model_id,
|
||||
name: info.name.into(),
|
||||
description: info.description.map(|desc| desc.into()),
|
||||
icon: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct AgentModelGroupName(pub SharedString);
|
||||
|
||||
@@ -341,7 +341,7 @@ mod test_support {
|
||||
image: true,
|
||||
audio: true,
|
||||
embedded_context: true,
|
||||
meta: None,
|
||||
supports_custom_commands: false,
|
||||
}),
|
||||
cx,
|
||||
)
|
||||
@@ -381,10 +381,7 @@ mod test_support {
|
||||
response_tx.replace(tx);
|
||||
cx.spawn(async move |_| {
|
||||
let stop_reason = rx.await?;
|
||||
Ok(acp::PromptResponse {
|
||||
stop_reason,
|
||||
meta: None,
|
||||
})
|
||||
Ok(acp::PromptResponse { stop_reason })
|
||||
})
|
||||
} else {
|
||||
for update in self.next_prompt_updates.lock().drain(..) {
|
||||
@@ -400,16 +397,14 @@ mod test_support {
|
||||
};
|
||||
let task = cx.spawn(async move |cx| {
|
||||
if let Some((tool_call, options)) = permission_request {
|
||||
thread
|
||||
.update(cx, |thread, cx| {
|
||||
thread.request_tool_call_authorization(
|
||||
tool_call.clone().into(),
|
||||
options.clone(),
|
||||
false,
|
||||
cx,
|
||||
)
|
||||
})??
|
||||
.await;
|
||||
let permission = thread.update(cx, |thread, cx| {
|
||||
thread.request_tool_call_authorization(
|
||||
tool_call.clone().into(),
|
||||
options.clone(),
|
||||
cx,
|
||||
)
|
||||
})?;
|
||||
permission?.await?;
|
||||
}
|
||||
thread.update(cx, |thread, cx| {
|
||||
thread.handle_session_update(update.clone(), cx).unwrap();
|
||||
@@ -423,7 +418,6 @@ mod test_support {
|
||||
try_join_all(tasks).await?;
|
||||
Ok(acp::PromptResponse {
|
||||
stop_reason: acp::StopReason::EndTurn,
|
||||
meta: None,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -450,6 +444,14 @@ mod test_support {
|
||||
Some(Rc::new(StubAgentSessionEditor))
|
||||
}
|
||||
|
||||
fn list_commands(&self, _session_id: &acp::SessionId, _cx: &mut App) -> Task<Result<acp::ListCommandsResponse>> {
|
||||
Task::ready(Ok(acp::ListCommandsResponse { commands: vec![] }))
|
||||
}
|
||||
|
||||
fn run_command(&self, _request: acp::RunCommandRequest, _cx: &mut App) -> Task<Result<()>> {
|
||||
Task::ready(Ok(()))
|
||||
}
|
||||
|
||||
fn into_any(self: Rc<Self>) -> Rc<dyn Any> {
|
||||
self
|
||||
}
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
use anyhow::Result;
|
||||
use buffer_diff::{BufferDiff, BufferDiffSnapshot};
|
||||
use editor::{MultiBuffer, PathKey, multibuffer_context_lines};
|
||||
use editor::{MultiBuffer, PathKey};
|
||||
use gpui::{App, AppContext, AsyncApp, Context, Entity, Subscription, Task};
|
||||
use itertools::Itertools;
|
||||
use language::{
|
||||
Anchor, Buffer, Capability, LanguageRegistry, OffsetRangeExt as _, Point, Rope, TextBuffer,
|
||||
};
|
||||
use std::{cmp::Reverse, ops::Range, path::Path, sync::Arc};
|
||||
use std::{
|
||||
cmp::Reverse,
|
||||
ops::Range,
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
use util::ResultExt;
|
||||
|
||||
pub enum Diff {
|
||||
@@ -16,7 +21,7 @@ pub enum Diff {
|
||||
|
||||
impl Diff {
|
||||
pub fn finalized(
|
||||
path: String,
|
||||
path: PathBuf,
|
||||
old_text: Option<String>,
|
||||
new_text: String,
|
||||
language_registry: Arc<LanguageRegistry>,
|
||||
@@ -31,7 +36,7 @@ impl Diff {
|
||||
let buffer = new_buffer.clone();
|
||||
async move |_, cx| {
|
||||
let language = language_registry
|
||||
.load_language_for_file_path(Path::new(&path))
|
||||
.language_for_file_path(&path)
|
||||
.await
|
||||
.log_err();
|
||||
|
||||
@@ -59,7 +64,7 @@ impl Diff {
|
||||
PathKey::for_buffer(&buffer, cx),
|
||||
buffer.clone(),
|
||||
hunk_ranges,
|
||||
multibuffer_context_lines(cx),
|
||||
editor::DEFAULT_MULTIBUFFER_CONTEXT,
|
||||
cx,
|
||||
);
|
||||
multibuffer.add_diff(diff, cx);
|
||||
@@ -147,15 +152,12 @@ impl Diff {
|
||||
let path = match self {
|
||||
Diff::Pending(PendingDiff {
|
||||
new_buffer: buffer, ..
|
||||
}) => buffer
|
||||
.read(cx)
|
||||
.file()
|
||||
.map(|file| file.path().display(file.path_style(cx))),
|
||||
Diff::Finalized(FinalizedDiff { path, .. }) => Some(path.as_str().into()),
|
||||
}) => buffer.read(cx).file().map(|file| file.path().as_ref()),
|
||||
Diff::Finalized(FinalizedDiff { path, .. }) => Some(path.as_path()),
|
||||
};
|
||||
format!(
|
||||
"Diff: {}\n```\n{}\n```\n",
|
||||
path.unwrap_or("untitled".into()),
|
||||
path.unwrap_or(Path::new("untitled")).display(),
|
||||
buffer_text
|
||||
)
|
||||
}
|
||||
@@ -242,8 +244,8 @@ impl PendingDiff {
|
||||
.new_buffer
|
||||
.read(cx)
|
||||
.file()
|
||||
.map(|file| file.path().display(file.path_style(cx)))
|
||||
.unwrap_or("untitled".into())
|
||||
.map(|file| file.path().as_ref())
|
||||
.unwrap_or(Path::new("untitled"))
|
||||
.into();
|
||||
|
||||
// Replace the buffer in the multibuffer with the snapshot
|
||||
@@ -277,7 +279,7 @@ impl PendingDiff {
|
||||
path_key,
|
||||
buffer,
|
||||
ranges,
|
||||
multibuffer_context_lines(cx),
|
||||
editor::DEFAULT_MULTIBUFFER_CONTEXT,
|
||||
cx,
|
||||
);
|
||||
multibuffer.add_diff(buffer_diff.clone(), cx);
|
||||
@@ -303,7 +305,7 @@ impl PendingDiff {
|
||||
PathKey::for_buffer(&self.new_buffer, cx),
|
||||
self.new_buffer.clone(),
|
||||
ranges,
|
||||
multibuffer_context_lines(cx),
|
||||
editor::DEFAULT_MULTIBUFFER_CONTEXT,
|
||||
cx,
|
||||
);
|
||||
let end = multibuffer.len(cx);
|
||||
@@ -346,7 +348,7 @@ impl PendingDiff {
|
||||
}
|
||||
|
||||
pub struct FinalizedDiff {
|
||||
path: String,
|
||||
path: PathBuf,
|
||||
base_text: Arc<String>,
|
||||
new_buffer: Entity<Buffer>,
|
||||
multibuffer: Entity<MultiBuffer>,
|
||||
|
||||
@@ -126,39 +126,6 @@ impl MentionUri {
|
||||
abs_path: None,
|
||||
line_range,
|
||||
})
|
||||
} else if let Some(name) = path.strip_prefix("/agent/symbol/") {
|
||||
let fragment = url
|
||||
.fragment()
|
||||
.context("Missing fragment for untitled buffer selection")?;
|
||||
let line_range = parse_line_range(fragment)?;
|
||||
let path =
|
||||
single_query_param(&url, "path")?.context("Missing path for symbol")?;
|
||||
Ok(Self::Symbol {
|
||||
name: name.to_string(),
|
||||
abs_path: path.into(),
|
||||
line_range,
|
||||
})
|
||||
} else if path.starts_with("/agent/file") {
|
||||
let path =
|
||||
single_query_param(&url, "path")?.context("Missing path for file")?;
|
||||
Ok(Self::File {
|
||||
abs_path: path.into(),
|
||||
})
|
||||
} else if path.starts_with("/agent/directory") {
|
||||
let path =
|
||||
single_query_param(&url, "path")?.context("Missing path for directory")?;
|
||||
Ok(Self::Directory {
|
||||
abs_path: path.into(),
|
||||
})
|
||||
} else if path.starts_with("/agent/selection") {
|
||||
let fragment = url.fragment().context("Missing fragment for selection")?;
|
||||
let line_range = parse_line_range(fragment)?;
|
||||
let path =
|
||||
single_query_param(&url, "path")?.context("Missing path for selection")?;
|
||||
Ok(Self::Selection {
|
||||
abs_path: Some(path.into()),
|
||||
line_range,
|
||||
})
|
||||
} else {
|
||||
bail!("invalid zed url: {:?}", input);
|
||||
}
|
||||
@@ -195,7 +162,7 @@ impl MentionUri {
|
||||
FileIcons::get_icon(abs_path, cx).unwrap_or_else(|| IconName::File.path().into())
|
||||
}
|
||||
MentionUri::PastedImage => IconName::Image.path().into(),
|
||||
MentionUri::Directory { abs_path } => FileIcons::get_folder_icon(false, abs_path, cx)
|
||||
MentionUri::Directory { .. } => FileIcons::get_folder_icon(false, cx)
|
||||
.unwrap_or_else(|| IconName::Folder.path().into()),
|
||||
MentionUri::Symbol { .. } => IconName::Code.path().into(),
|
||||
MentionUri::Thread { .. } => IconName::Thread.path().into(),
|
||||
@@ -213,29 +180,20 @@ impl MentionUri {
|
||||
pub fn to_uri(&self) -> Url {
|
||||
match self {
|
||||
MentionUri::File { abs_path } => {
|
||||
let mut url = Url::parse("zed:///").unwrap();
|
||||
url.set_path("/agent/file");
|
||||
url.query_pairs_mut()
|
||||
.append_pair("path", &abs_path.to_string_lossy());
|
||||
url
|
||||
Url::from_file_path(abs_path).expect("mention path should be absolute")
|
||||
}
|
||||
MentionUri::PastedImage => Url::parse("zed:///agent/pasted-image").unwrap(),
|
||||
MentionUri::Directory { abs_path } => {
|
||||
let mut url = Url::parse("zed:///").unwrap();
|
||||
url.set_path("/agent/directory");
|
||||
url.query_pairs_mut()
|
||||
.append_pair("path", &abs_path.to_string_lossy());
|
||||
url
|
||||
Url::from_directory_path(abs_path).expect("mention path should be absolute")
|
||||
}
|
||||
MentionUri::Symbol {
|
||||
abs_path,
|
||||
name,
|
||||
line_range,
|
||||
} => {
|
||||
let mut url = Url::parse("zed:///").unwrap();
|
||||
url.set_path(&format!("/agent/symbol/{name}"));
|
||||
url.query_pairs_mut()
|
||||
.append_pair("path", &abs_path.to_string_lossy());
|
||||
let mut url =
|
||||
Url::from_file_path(abs_path).expect("mention path should be absolute");
|
||||
url.query_pairs_mut().append_pair("symbol", name);
|
||||
url.set_fragment(Some(&format!(
|
||||
"L{}:{}",
|
||||
line_range.start() + 1,
|
||||
@@ -244,16 +202,15 @@ impl MentionUri {
|
||||
url
|
||||
}
|
||||
MentionUri::Selection {
|
||||
abs_path,
|
||||
abs_path: path,
|
||||
line_range,
|
||||
} => {
|
||||
let mut url = Url::parse("zed:///").unwrap();
|
||||
if let Some(abs_path) = abs_path {
|
||||
url.set_path("/agent/selection");
|
||||
url.query_pairs_mut()
|
||||
.append_pair("path", &abs_path.to_string_lossy());
|
||||
let mut url = if let Some(path) = path {
|
||||
Url::from_file_path(path).expect("mention path should be absolute")
|
||||
} else {
|
||||
let mut url = Url::parse("zed:///").unwrap();
|
||||
url.set_path("/agent/untitled-buffer");
|
||||
url
|
||||
};
|
||||
url.set_fragment(Some(&format!(
|
||||
"L{}:{}",
|
||||
@@ -338,32 +295,37 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_parse_file_uri() {
|
||||
let old_uri = uri!("file:///path/to/file.rs");
|
||||
let parsed = MentionUri::parse(old_uri).unwrap();
|
||||
let file_uri = uri!("file:///path/to/file.rs");
|
||||
let parsed = MentionUri::parse(file_uri).unwrap();
|
||||
match &parsed {
|
||||
MentionUri::File { abs_path } => {
|
||||
assert_eq!(abs_path.to_str().unwrap(), path!("/path/to/file.rs"));
|
||||
}
|
||||
_ => panic!("Expected File variant"),
|
||||
}
|
||||
let new_uri = parsed.to_uri().to_string();
|
||||
assert!(new_uri.starts_with("zed:///agent/file"));
|
||||
assert_eq!(MentionUri::parse(&new_uri).unwrap(), parsed);
|
||||
assert_eq!(parsed.to_uri().to_string(), file_uri);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_directory_uri() {
|
||||
let old_uri = uri!("file:///path/to/dir/");
|
||||
let parsed = MentionUri::parse(old_uri).unwrap();
|
||||
let file_uri = uri!("file:///path/to/dir/");
|
||||
let parsed = MentionUri::parse(file_uri).unwrap();
|
||||
match &parsed {
|
||||
MentionUri::Directory { abs_path } => {
|
||||
assert_eq!(abs_path.to_str().unwrap(), path!("/path/to/dir/"));
|
||||
}
|
||||
_ => panic!("Expected Directory variant"),
|
||||
}
|
||||
let new_uri = parsed.to_uri().to_string();
|
||||
assert!(new_uri.starts_with("zed:///agent/directory"));
|
||||
assert_eq!(MentionUri::parse(&new_uri).unwrap(), parsed);
|
||||
assert_eq!(parsed.to_uri().to_string(), file_uri);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_directory_uri_with_slash() {
|
||||
let uri = MentionUri::Directory {
|
||||
abs_path: PathBuf::from(path!("/path/to/dir/")),
|
||||
};
|
||||
let expected = uri!("file:///path/to/dir/");
|
||||
assert_eq!(uri.to_uri().to_string(), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -371,15 +333,14 @@ mod tests {
|
||||
let uri = MentionUri::Directory {
|
||||
abs_path: PathBuf::from(path!("/path/to/dir")),
|
||||
};
|
||||
let uri_string = uri.to_uri().to_string();
|
||||
assert!(uri_string.starts_with("zed:///agent/directory"));
|
||||
assert_eq!(MentionUri::parse(&uri_string).unwrap(), uri);
|
||||
let expected = uri!("file:///path/to/dir/");
|
||||
assert_eq!(uri.to_uri().to_string(), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_symbol_uri() {
|
||||
let old_uri = uri!("file:///path/to/file.rs?symbol=MySymbol#L10:20");
|
||||
let parsed = MentionUri::parse(old_uri).unwrap();
|
||||
let symbol_uri = uri!("file:///path/to/file.rs?symbol=MySymbol#L10:20");
|
||||
let parsed = MentionUri::parse(symbol_uri).unwrap();
|
||||
match &parsed {
|
||||
MentionUri::Symbol {
|
||||
abs_path: path,
|
||||
@@ -393,15 +354,13 @@ mod tests {
|
||||
}
|
||||
_ => panic!("Expected Symbol variant"),
|
||||
}
|
||||
let new_uri = parsed.to_uri().to_string();
|
||||
assert!(new_uri.starts_with("zed:///agent/symbol/MySymbol"));
|
||||
assert_eq!(MentionUri::parse(&new_uri).unwrap(), parsed);
|
||||
assert_eq!(parsed.to_uri().to_string(), symbol_uri);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_selection_uri() {
|
||||
let old_uri = uri!("file:///path/to/file.rs#L5:15");
|
||||
let parsed = MentionUri::parse(old_uri).unwrap();
|
||||
let selection_uri = uri!("file:///path/to/file.rs#L5:15");
|
||||
let parsed = MentionUri::parse(selection_uri).unwrap();
|
||||
match &parsed {
|
||||
MentionUri::Selection {
|
||||
abs_path: path,
|
||||
@@ -416,9 +375,7 @@ mod tests {
|
||||
}
|
||||
_ => panic!("Expected Selection variant"),
|
||||
}
|
||||
let new_uri = parsed.to_uri().to_string();
|
||||
assert!(new_uri.starts_with("zed:///agent/selection"));
|
||||
assert_eq!(MentionUri::parse(&new_uri).unwrap(), parsed);
|
||||
assert_eq!(parsed.to_uri().to_string(), selection_uri);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,46 +1,37 @@
|
||||
use agent_client_protocol as acp;
|
||||
|
||||
use futures::{FutureExt as _, future::Shared};
|
||||
use gpui::{App, AppContext, Context, Entity, Task};
|
||||
use gpui::{App, AppContext, Context, Entity};
|
||||
use language::LanguageRegistry;
|
||||
use markdown::Markdown;
|
||||
use std::{path::PathBuf, process::ExitStatus, sync::Arc, time::Instant};
|
||||
|
||||
pub struct Terminal {
|
||||
id: acp::TerminalId,
|
||||
command: Entity<Markdown>,
|
||||
working_dir: Option<PathBuf>,
|
||||
terminal: Entity<terminal::Terminal>,
|
||||
started_at: Instant,
|
||||
output: Option<TerminalOutput>,
|
||||
output_byte_limit: Option<usize>,
|
||||
_output_task: Shared<Task<acp::TerminalExitStatus>>,
|
||||
}
|
||||
|
||||
pub struct TerminalOutput {
|
||||
pub ended_at: Instant,
|
||||
pub exit_status: Option<ExitStatus>,
|
||||
pub content: String,
|
||||
pub was_content_truncated: bool,
|
||||
pub original_content_len: usize,
|
||||
pub content_line_count: usize,
|
||||
pub finished_with_empty_output: bool,
|
||||
}
|
||||
|
||||
impl Terminal {
|
||||
pub fn new(
|
||||
id: acp::TerminalId,
|
||||
command_label: &str,
|
||||
command: String,
|
||||
working_dir: Option<PathBuf>,
|
||||
output_byte_limit: Option<usize>,
|
||||
terminal: Entity<terminal::Terminal>,
|
||||
language_registry: Arc<LanguageRegistry>,
|
||||
cx: &mut Context<Self>,
|
||||
) -> Self {
|
||||
let command_task = terminal.read(cx).wait_for_completed_task(cx);
|
||||
Self {
|
||||
id,
|
||||
command: cx.new(|cx| {
|
||||
Markdown::new(
|
||||
format!("```\n{}\n```", command_label).into(),
|
||||
format!("```\n{}\n```", command).into(),
|
||||
Some(language_registry.clone()),
|
||||
None,
|
||||
cx,
|
||||
@@ -50,97 +41,27 @@ impl Terminal {
|
||||
terminal,
|
||||
started_at: Instant::now(),
|
||||
output: None,
|
||||
output_byte_limit,
|
||||
_output_task: cx
|
||||
.spawn(async move |this, cx| {
|
||||
let exit_status = command_task.await;
|
||||
|
||||
this.update(cx, |this, cx| {
|
||||
let (content, original_content_len) = this.truncated_output(cx);
|
||||
let content_line_count = this.terminal.read(cx).total_lines();
|
||||
|
||||
this.output = Some(TerminalOutput {
|
||||
ended_at: Instant::now(),
|
||||
exit_status,
|
||||
content,
|
||||
original_content_len,
|
||||
content_line_count,
|
||||
});
|
||||
cx.notify();
|
||||
})
|
||||
.ok();
|
||||
|
||||
let exit_status = exit_status.map(portable_pty::ExitStatus::from);
|
||||
|
||||
acp::TerminalExitStatus {
|
||||
exit_code: exit_status.as_ref().map(|e| e.exit_code()),
|
||||
signal: exit_status.and_then(|e| e.signal().map(Into::into)),
|
||||
meta: None,
|
||||
}
|
||||
})
|
||||
.shared(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn id(&self) -> &acp::TerminalId {
|
||||
&self.id
|
||||
}
|
||||
|
||||
pub fn wait_for_exit(&self) -> Shared<Task<acp::TerminalExitStatus>> {
|
||||
self._output_task.clone()
|
||||
}
|
||||
|
||||
pub fn kill(&mut self, cx: &mut App) {
|
||||
self.terminal.update(cx, |terminal, _cx| {
|
||||
terminal.kill_active_task();
|
||||
pub fn finish(
|
||||
&mut self,
|
||||
exit_status: Option<ExitStatus>,
|
||||
original_content_len: usize,
|
||||
truncated_content_len: usize,
|
||||
content_line_count: usize,
|
||||
finished_with_empty_output: bool,
|
||||
cx: &mut Context<Self>,
|
||||
) {
|
||||
self.output = Some(TerminalOutput {
|
||||
ended_at: Instant::now(),
|
||||
exit_status,
|
||||
was_content_truncated: truncated_content_len < original_content_len,
|
||||
original_content_len,
|
||||
content_line_count,
|
||||
finished_with_empty_output,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn current_output(&self, cx: &App) -> acp::TerminalOutputResponse {
|
||||
if let Some(output) = self.output.as_ref() {
|
||||
let exit_status = output.exit_status.map(portable_pty::ExitStatus::from);
|
||||
|
||||
acp::TerminalOutputResponse {
|
||||
output: output.content.clone(),
|
||||
truncated: output.original_content_len > output.content.len(),
|
||||
exit_status: Some(acp::TerminalExitStatus {
|
||||
exit_code: exit_status.as_ref().map(|e| e.exit_code()),
|
||||
signal: exit_status.and_then(|e| e.signal().map(Into::into)),
|
||||
meta: None,
|
||||
}),
|
||||
meta: None,
|
||||
}
|
||||
} else {
|
||||
let (current_content, original_len) = self.truncated_output(cx);
|
||||
|
||||
acp::TerminalOutputResponse {
|
||||
truncated: current_content.len() < original_len,
|
||||
output: current_content,
|
||||
exit_status: None,
|
||||
meta: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn truncated_output(&self, cx: &App) -> (String, usize) {
|
||||
let terminal = self.terminal.read(cx);
|
||||
let mut content = terminal.get_content();
|
||||
|
||||
let original_content_len = content.len();
|
||||
|
||||
if let Some(limit) = self.output_byte_limit
|
||||
&& content.len() > limit
|
||||
{
|
||||
let mut end_ix = limit.min(content.len());
|
||||
while !content.is_char_boundary(end_ix) {
|
||||
end_ix -= 1;
|
||||
}
|
||||
// Don't truncate mid-line, clear the remainder of the last line
|
||||
end_ix = content[..end_ix].rfind('\n').unwrap_or(end_ix);
|
||||
content.truncate(end_ix);
|
||||
}
|
||||
|
||||
(content, original_content_len)
|
||||
cx.notify();
|
||||
}
|
||||
|
||||
pub fn command(&self) -> &Entity<Markdown> {
|
||||
|
||||
@@ -4,26 +4,22 @@ use std::{
|
||||
fmt::Display,
|
||||
rc::{Rc, Weak},
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use agent_client_protocol as acp;
|
||||
use collections::HashMap;
|
||||
use gpui::{
|
||||
App, ClipboardItem, Empty, Entity, EventEmitter, FocusHandle, Focusable, Global, ListAlignment,
|
||||
ListState, StyleRefinement, Subscription, Task, TextStyleRefinement, Window, actions, list,
|
||||
prelude::*,
|
||||
App, Empty, Entity, EventEmitter, FocusHandle, Focusable, Global, ListAlignment, ListState,
|
||||
StyleRefinement, Subscription, Task, TextStyleRefinement, Window, actions, list, prelude::*,
|
||||
};
|
||||
use language::LanguageRegistry;
|
||||
use markdown::{CodeBlockRenderer, Markdown, MarkdownElement, MarkdownStyle};
|
||||
use project::Project;
|
||||
use settings::Settings;
|
||||
use theme::ThemeSettings;
|
||||
use ui::{Tooltip, prelude::*};
|
||||
use ui::prelude::*;
|
||||
use util::ResultExt as _;
|
||||
use workspace::{
|
||||
Item, ItemHandle, ToolbarItemEvent, ToolbarItemLocation, ToolbarItemView, Workspace,
|
||||
};
|
||||
use workspace::{Item, Workspace};
|
||||
|
||||
actions!(dev, [OpenAcpLogs]);
|
||||
|
||||
@@ -231,34 +227,6 @@ impl AcpTools {
|
||||
cx.notify();
|
||||
}
|
||||
|
||||
fn serialize_observed_messages(&self) -> Option<String> {
|
||||
let connection = self.watched_connection.as_ref()?;
|
||||
|
||||
let messages: Vec<serde_json::Value> = connection
|
||||
.messages
|
||||
.iter()
|
||||
.filter_map(|message| {
|
||||
let params = match &message.params {
|
||||
Ok(Some(params)) => params.clone(),
|
||||
Ok(None) => serde_json::Value::Null,
|
||||
Err(err) => serde_json::to_value(err).ok()?,
|
||||
};
|
||||
Some(serde_json::json!({
|
||||
"_direction": match message.direction {
|
||||
acp::StreamMessageDirection::Incoming => "incoming",
|
||||
acp::StreamMessageDirection::Outgoing => "outgoing",
|
||||
},
|
||||
"_type": message.message_type.to_string().to_lowercase(),
|
||||
"id": message.request_id,
|
||||
"method": message.name.to_string(),
|
||||
"params": params,
|
||||
}))
|
||||
})
|
||||
.collect();
|
||||
|
||||
serde_json::to_string_pretty(&messages).ok()
|
||||
}
|
||||
|
||||
fn render_message(
|
||||
&mut self,
|
||||
index: usize,
|
||||
@@ -524,92 +492,3 @@ impl Render for AcpTools {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AcpToolsToolbarItemView {
|
||||
acp_tools: Option<Entity<AcpTools>>,
|
||||
just_copied: bool,
|
||||
}
|
||||
|
||||
impl AcpToolsToolbarItemView {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
acp_tools: None,
|
||||
just_copied: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Render for AcpToolsToolbarItemView {
|
||||
fn render(&mut self, _window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
|
||||
let Some(acp_tools) = self.acp_tools.as_ref() else {
|
||||
return Empty.into_any_element();
|
||||
};
|
||||
|
||||
let acp_tools = acp_tools.clone();
|
||||
|
||||
h_flex()
|
||||
.gap_2()
|
||||
.child(
|
||||
IconButton::new(
|
||||
"copy_all_messages",
|
||||
if self.just_copied {
|
||||
IconName::Check
|
||||
} else {
|
||||
IconName::Copy
|
||||
},
|
||||
)
|
||||
.icon_size(IconSize::Small)
|
||||
.tooltip(Tooltip::text(if self.just_copied {
|
||||
"Copied!"
|
||||
} else {
|
||||
"Copy All Messages"
|
||||
}))
|
||||
.disabled(
|
||||
acp_tools
|
||||
.read(cx)
|
||||
.watched_connection
|
||||
.as_ref()
|
||||
.is_none_or(|connection| connection.messages.is_empty()),
|
||||
)
|
||||
.on_click(cx.listener(move |this, _, _window, cx| {
|
||||
if let Some(content) = acp_tools.read(cx).serialize_observed_messages() {
|
||||
cx.write_to_clipboard(ClipboardItem::new_string(content));
|
||||
|
||||
this.just_copied = true;
|
||||
cx.spawn(async move |this, cx| {
|
||||
cx.background_executor().timer(Duration::from_secs(2)).await;
|
||||
this.update(cx, |this, cx| {
|
||||
this.just_copied = false;
|
||||
cx.notify();
|
||||
})
|
||||
})
|
||||
.detach();
|
||||
}
|
||||
})),
|
||||
)
|
||||
.into_any()
|
||||
}
|
||||
}
|
||||
|
||||
impl EventEmitter<ToolbarItemEvent> for AcpToolsToolbarItemView {}
|
||||
|
||||
impl ToolbarItemView for AcpToolsToolbarItemView {
|
||||
fn set_active_pane_item(
|
||||
&mut self,
|
||||
active_pane_item: Option<&dyn ItemHandle>,
|
||||
_window: &mut Window,
|
||||
cx: &mut Context<Self>,
|
||||
) -> ToolbarItemLocation {
|
||||
if let Some(item) = active_pane_item
|
||||
&& let Some(acp_tools) = item.downcast::<AcpTools>()
|
||||
{
|
||||
self.acp_tools = Some(acp_tools);
|
||||
cx.notify();
|
||||
return ToolbarItemLocation::PrimaryRight;
|
||||
}
|
||||
if self.acp_tools.take().is_some() {
|
||||
cx.notify();
|
||||
}
|
||||
ToolbarItemLocation::Hidden
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,10 @@ use language::{Anchor, Buffer, BufferEvent, DiskState, Point, ToPoint};
|
||||
use project::{Project, ProjectItem, lsp_store::OpenLspBufferHandle};
|
||||
use std::{cmp, ops::Range, sync::Arc};
|
||||
use text::{Edit, Patch, Rope};
|
||||
use util::{RangeExt, ResultExt as _};
|
||||
use util::{
|
||||
RangeExt, ResultExt as _,
|
||||
paths::{PathStyle, RemotePathBuf},
|
||||
};
|
||||
|
||||
/// Tracks actions performed by tools in a thread
|
||||
pub struct ActionLog {
|
||||
@@ -59,13 +62,7 @@ impl ActionLog {
|
||||
let file_path = buffer
|
||||
.read(cx)
|
||||
.file()
|
||||
.map(|file| {
|
||||
let mut path = file.full_path(cx).to_string_lossy().into_owned();
|
||||
if file.path_style(cx).is_windows() {
|
||||
path = path.replace('\\', "/");
|
||||
}
|
||||
path
|
||||
})
|
||||
.map(|file| RemotePathBuf::new(file.full_path(cx), PathStyle::Posix).to_proto())
|
||||
.unwrap_or_else(|| format!("buffer_{}", buffer.entity_id()));
|
||||
|
||||
let mut result = String::new();
|
||||
@@ -2221,7 +2218,7 @@ mod tests {
|
||||
action_log.update(cx, |log, cx| log.buffer_read(buffer.clone(), cx));
|
||||
|
||||
for _ in 0..operations {
|
||||
match rng.random_range(0..100) {
|
||||
match rng.gen_range(0..100) {
|
||||
0..25 => {
|
||||
action_log.update(cx, |log, cx| {
|
||||
let range = buffer.read(cx).random_byte_range(0, &mut rng);
|
||||
@@ -2240,7 +2237,7 @@ mod tests {
|
||||
.unwrap();
|
||||
}
|
||||
_ => {
|
||||
let is_agent_edit = rng.random_bool(0.5);
|
||||
let is_agent_edit = rng.gen_bool(0.5);
|
||||
if is_agent_edit {
|
||||
log::info!("agent edit");
|
||||
} else {
|
||||
@@ -2255,7 +2252,7 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
if rng.random_bool(0.2) {
|
||||
if rng.gen_bool(0.2) {
|
||||
quiesce(&action_log, &buffer, cx);
|
||||
}
|
||||
}
|
||||
@@ -2304,7 +2301,7 @@ mod tests {
|
||||
.await;
|
||||
fs.set_head_for_repo(
|
||||
path!("/project/.git").as_ref(),
|
||||
&[("file.txt", "a\nb\nc\nd\ne\nf\ng\nh\ni\nj".into())],
|
||||
&[("file.txt".into(), "a\nb\nc\nd\ne\nf\ng\nh\ni\nj".into())],
|
||||
"0000000",
|
||||
);
|
||||
cx.run_until_parked();
|
||||
@@ -2387,7 +2384,7 @@ mod tests {
|
||||
// - Ignores the last line edit (j stays as j)
|
||||
fs.set_head_for_repo(
|
||||
path!("/project/.git").as_ref(),
|
||||
&[("file.txt", "A\nb\nc\nf\nG\nh\ni\nj".into())],
|
||||
&[("file.txt".into(), "A\nb\nc\nf\nG\nh\ni\nj".into())],
|
||||
"0000001",
|
||||
);
|
||||
cx.run_until_parked();
|
||||
@@ -2418,7 +2415,10 @@ mod tests {
|
||||
// Make another commit that accepts the NEW line but with different content
|
||||
fs.set_head_for_repo(
|
||||
path!("/project/.git").as_ref(),
|
||||
&[("file.txt", "A\nb\nc\nf\nGGG\nh\nDIFFERENT\ni\nj".into())],
|
||||
&[(
|
||||
"file.txt".into(),
|
||||
"A\nb\nc\nf\nGGG\nh\nDIFFERENT\ni\nj".into(),
|
||||
)],
|
||||
"0000002",
|
||||
);
|
||||
cx.run_until_parked();
|
||||
@@ -2444,7 +2444,7 @@ mod tests {
|
||||
// Final commit that accepts all remaining edits
|
||||
fs.set_head_for_repo(
|
||||
path!("/project/.git").as_ref(),
|
||||
&[("file.txt", "A\nb\nc\nf\nGGG\nh\nNEW\ni\nJ".into())],
|
||||
&[("file.txt".into(), "A\nb\nc\nf\nGGG\nh\nNEW\ni\nJ".into())],
|
||||
"0000003",
|
||||
);
|
||||
cx.run_until_parked();
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
use auto_update::{AutoUpdateStatus, AutoUpdater, DismissMessage, VersionCheckType};
|
||||
use auto_update::{AutoUpdateStatus, AutoUpdater, DismissErrorMessage, VersionCheckType};
|
||||
use editor::Editor;
|
||||
use extension_host::{ExtensionOperation, ExtensionStore};
|
||||
use extension_host::ExtensionStore;
|
||||
use futures::StreamExt;
|
||||
use gpui::{
|
||||
App, Context, CursorStyle, Entity, EventEmitter, InteractiveElement as _, ParentElement as _,
|
||||
Render, SharedString, StatefulInteractiveElement, Styled, Window, actions,
|
||||
Animation, AnimationExt as _, App, Context, CursorStyle, Entity, EventEmitter,
|
||||
InteractiveElement as _, ParentElement as _, Render, SharedString, StatefulInteractiveElement,
|
||||
Styled, Transformation, Window, actions, percentage,
|
||||
};
|
||||
use language::{
|
||||
BinaryStatus, LanguageRegistry, LanguageServerId, LanguageServerName,
|
||||
@@ -20,13 +21,11 @@ use std::{
|
||||
cmp::Reverse,
|
||||
collections::HashSet,
|
||||
fmt::Write,
|
||||
path::Path,
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use ui::{
|
||||
ButtonLike, CommonAnimationExt, ContextMenu, PopoverMenu, PopoverMenuHandle, Tooltip,
|
||||
prelude::*,
|
||||
};
|
||||
use ui::{ButtonLike, ContextMenu, PopoverMenu, PopoverMenuHandle, Tooltip, prelude::*};
|
||||
use util::truncate_and_trailoff;
|
||||
use workspace::{StatusItemView, Workspace, item::ItemHandle};
|
||||
|
||||
@@ -83,6 +82,7 @@ impl ActivityIndicator {
|
||||
) -> Entity<ActivityIndicator> {
|
||||
let project = workspace.project().clone();
|
||||
let auto_updater = AutoUpdater::get(cx);
|
||||
let workspace_handle = cx.entity();
|
||||
let this = cx.new(|cx| {
|
||||
let mut status_events = languages.language_server_binary_statuses();
|
||||
cx.spawn(async move |this, cx| {
|
||||
@@ -100,6 +100,20 @@ impl ActivityIndicator {
|
||||
})
|
||||
.detach();
|
||||
|
||||
cx.subscribe_in(
|
||||
&workspace_handle,
|
||||
window,
|
||||
|activity_indicator, _, event, window, cx| {
|
||||
if let workspace::Event::ClearActivityIndicator = event
|
||||
&& activity_indicator.statuses.pop().is_some()
|
||||
{
|
||||
activity_indicator.dismiss_error_message(&DismissErrorMessage, window, cx);
|
||||
cx.notify();
|
||||
}
|
||||
},
|
||||
)
|
||||
.detach();
|
||||
|
||||
cx.subscribe(
|
||||
&project.read(cx).lsp_store(),
|
||||
|activity_indicator, _, event, cx| {
|
||||
@@ -211,8 +225,7 @@ impl ActivityIndicator {
|
||||
server_name,
|
||||
status,
|
||||
} => {
|
||||
let create_buffer =
|
||||
project.update(cx, |project, cx| project.create_buffer(false, cx));
|
||||
let create_buffer = project.update(cx, |project, cx| project.create_buffer(cx));
|
||||
let status = status.clone();
|
||||
let server_name = server_name.clone();
|
||||
cx.spawn_in(window, async move |workspace, cx| {
|
||||
@@ -279,13 +292,18 @@ impl ActivityIndicator {
|
||||
});
|
||||
}
|
||||
|
||||
fn dismiss_message(&mut self, _: &DismissMessage, _: &mut Window, cx: &mut Context<Self>) {
|
||||
let dismissed = if let Some(updater) = &self.auto_updater {
|
||||
updater.update(cx, |updater, cx| updater.dismiss(cx))
|
||||
fn dismiss_error_message(
|
||||
&mut self,
|
||||
_: &DismissErrorMessage,
|
||||
_: &mut Window,
|
||||
cx: &mut Context<Self>,
|
||||
) {
|
||||
let error_dismissed = if let Some(updater) = &self.auto_updater {
|
||||
updater.update(cx, |updater, cx| updater.dismiss_error(cx))
|
||||
} else {
|
||||
false
|
||||
};
|
||||
if dismissed {
|
||||
if error_dismissed {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -327,13 +345,17 @@ impl ActivityIndicator {
|
||||
.flatten()
|
||||
}
|
||||
|
||||
fn pending_environment_error<'a>(&'a self, cx: &'a App) -> Option<&'a EnvironmentErrorMessage> {
|
||||
self.project.read(cx).peek_environment_error(cx)
|
||||
fn pending_environment_errors<'a>(
|
||||
&'a self,
|
||||
cx: &'a App,
|
||||
) -> impl Iterator<Item = (&'a Arc<Path>, &'a EnvironmentErrorMessage)> {
|
||||
self.project.read(cx).shell_environment_errors(cx)
|
||||
}
|
||||
|
||||
fn content_to_render(&mut self, cx: &mut Context<Self>) -> Option<Content> {
|
||||
// Show if any direnv calls failed
|
||||
if let Some(error) = self.pending_environment_error(cx) {
|
||||
if let Some((abs_path, error)) = self.pending_environment_errors(cx).next() {
|
||||
let abs_path = abs_path.clone();
|
||||
return Some(Content {
|
||||
icon: Some(
|
||||
Icon::new(IconName::Warning)
|
||||
@@ -343,7 +365,7 @@ impl ActivityIndicator {
|
||||
message: error.0.clone(),
|
||||
on_click: Some(Arc::new(move |this, window, cx| {
|
||||
this.project.update(cx, |project, cx| {
|
||||
project.pop_environment_error(cx);
|
||||
project.remove_environment_error(&abs_path, cx);
|
||||
});
|
||||
window.dispatch_action(Box::new(workspace::OpenLog), cx);
|
||||
})),
|
||||
@@ -383,7 +405,13 @@ impl ActivityIndicator {
|
||||
icon: Some(
|
||||
Icon::new(IconName::ArrowCircle)
|
||||
.size(IconSize::Small)
|
||||
.with_rotate_animation(2)
|
||||
.with_animation(
|
||||
"arrow-circle",
|
||||
Animation::new(Duration::from_secs(2)).repeat(),
|
||||
|icon, delta| {
|
||||
icon.transform(Transformation::rotate(percentage(delta)))
|
||||
},
|
||||
)
|
||||
.into_any_element(),
|
||||
),
|
||||
message,
|
||||
@@ -405,7 +433,11 @@ impl ActivityIndicator {
|
||||
icon: Some(
|
||||
Icon::new(IconName::ArrowCircle)
|
||||
.size(IconSize::Small)
|
||||
.with_rotate_animation(2)
|
||||
.with_animation(
|
||||
"arrow-circle",
|
||||
Animation::new(Duration::from_secs(2)).repeat(),
|
||||
|icon, delta| icon.transform(Transformation::rotate(percentage(delta))),
|
||||
)
|
||||
.into_any_element(),
|
||||
),
|
||||
message: format!("Debug: {}", session.read(cx).adapter()),
|
||||
@@ -428,7 +460,11 @@ impl ActivityIndicator {
|
||||
icon: Some(
|
||||
Icon::new(IconName::ArrowCircle)
|
||||
.size(IconSize::Small)
|
||||
.with_rotate_animation(2)
|
||||
.with_animation(
|
||||
"arrow-circle",
|
||||
Animation::new(Duration::from_secs(2)).repeat(),
|
||||
|icon, delta| icon.transform(Transformation::rotate(percentage(delta))),
|
||||
)
|
||||
.into_any_element(),
|
||||
),
|
||||
message: job_info.message.into(),
|
||||
@@ -503,7 +539,7 @@ impl ActivityIndicator {
|
||||
on_click: Some(Arc::new(move |this, window, cx| {
|
||||
this.statuses
|
||||
.retain(|status| !downloading.contains(&status.name));
|
||||
this.dismiss_message(&DismissMessage, window, cx)
|
||||
this.dismiss_error_message(&DismissErrorMessage, window, cx)
|
||||
})),
|
||||
tooltip_message: None,
|
||||
});
|
||||
@@ -532,7 +568,7 @@ impl ActivityIndicator {
|
||||
on_click: Some(Arc::new(move |this, window, cx| {
|
||||
this.statuses
|
||||
.retain(|status| !checking_for_update.contains(&status.name));
|
||||
this.dismiss_message(&DismissMessage, window, cx)
|
||||
this.dismiss_error_message(&DismissErrorMessage, window, cx)
|
||||
})),
|
||||
tooltip_message: None,
|
||||
});
|
||||
@@ -635,19 +671,17 @@ impl ActivityIndicator {
|
||||
}
|
||||
|
||||
// Show any application auto-update info.
|
||||
self.auto_updater
|
||||
.as_ref()
|
||||
.and_then(|updater| match &updater.read(cx).status() {
|
||||
if let Some(updater) = &self.auto_updater {
|
||||
return match &updater.read(cx).status() {
|
||||
AutoUpdateStatus::Checking => Some(Content {
|
||||
icon: Some(
|
||||
Icon::new(IconName::LoadCircle)
|
||||
Icon::new(IconName::Download)
|
||||
.size(IconSize::Small)
|
||||
.with_rotate_animation(3)
|
||||
.into_any_element(),
|
||||
),
|
||||
message: "Checking for Zed updates…".to_string(),
|
||||
on_click: Some(Arc::new(|this, window, cx| {
|
||||
this.dismiss_message(&DismissMessage, window, cx)
|
||||
this.dismiss_error_message(&DismissErrorMessage, window, cx)
|
||||
})),
|
||||
tooltip_message: None,
|
||||
}),
|
||||
@@ -659,20 +693,19 @@ impl ActivityIndicator {
|
||||
),
|
||||
message: "Downloading Zed update…".to_string(),
|
||||
on_click: Some(Arc::new(|this, window, cx| {
|
||||
this.dismiss_message(&DismissMessage, window, cx)
|
||||
this.dismiss_error_message(&DismissErrorMessage, window, cx)
|
||||
})),
|
||||
tooltip_message: Some(Self::version_tooltip_message(version)),
|
||||
}),
|
||||
AutoUpdateStatus::Installing { version } => Some(Content {
|
||||
icon: Some(
|
||||
Icon::new(IconName::LoadCircle)
|
||||
Icon::new(IconName::Download)
|
||||
.size(IconSize::Small)
|
||||
.with_rotate_animation(3)
|
||||
.into_any_element(),
|
||||
),
|
||||
message: "Installing Zed update…".to_string(),
|
||||
on_click: Some(Arc::new(|this, window, cx| {
|
||||
this.dismiss_message(&DismissMessage, window, cx)
|
||||
this.dismiss_error_message(&DismissErrorMessage, window, cx)
|
||||
})),
|
||||
tooltip_message: Some(Self::version_tooltip_message(version)),
|
||||
}),
|
||||
@@ -682,63 +715,41 @@ impl ActivityIndicator {
|
||||
on_click: Some(Arc::new(move |_, _, cx| workspace::reload(cx))),
|
||||
tooltip_message: Some(Self::version_tooltip_message(version)),
|
||||
}),
|
||||
AutoUpdateStatus::Errored { error } => Some(Content {
|
||||
AutoUpdateStatus::Errored => Some(Content {
|
||||
icon: Some(
|
||||
Icon::new(IconName::Warning)
|
||||
.size(IconSize::Small)
|
||||
.into_any_element(),
|
||||
),
|
||||
message: "Failed to update Zed".to_string(),
|
||||
message: "Auto update failed".to_string(),
|
||||
on_click: Some(Arc::new(|this, window, cx| {
|
||||
window.dispatch_action(Box::new(workspace::OpenLog), cx);
|
||||
this.dismiss_message(&DismissMessage, window, cx);
|
||||
this.dismiss_error_message(&DismissErrorMessage, window, cx)
|
||||
})),
|
||||
tooltip_message: Some(format!("{error}")),
|
||||
tooltip_message: None,
|
||||
}),
|
||||
AutoUpdateStatus::Idle => None,
|
||||
})
|
||||
.or_else(|| {
|
||||
if let Some(extension_store) =
|
||||
ExtensionStore::try_global(cx).map(|extension_store| extension_store.read(cx))
|
||||
&& let Some((extension_id, operation)) =
|
||||
extension_store.outstanding_operations().iter().next()
|
||||
{
|
||||
let (message, icon, rotate) = match operation {
|
||||
ExtensionOperation::Install => (
|
||||
format!("Installing {extension_id} extension…"),
|
||||
IconName::LoadCircle,
|
||||
true,
|
||||
),
|
||||
ExtensionOperation::Upgrade => (
|
||||
format!("Updating {extension_id} extension…"),
|
||||
IconName::Download,
|
||||
false,
|
||||
),
|
||||
ExtensionOperation::Remove => (
|
||||
format!("Removing {extension_id} extension…"),
|
||||
IconName::LoadCircle,
|
||||
true,
|
||||
),
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
Some(Content {
|
||||
icon: Some(Icon::new(icon).size(IconSize::Small).map(|this| {
|
||||
if rotate {
|
||||
this.with_rotate_animation(3).into_any_element()
|
||||
} else {
|
||||
this.into_any_element()
|
||||
}
|
||||
})),
|
||||
message,
|
||||
on_click: Some(Arc::new(|this, window, cx| {
|
||||
this.dismiss_message(&Default::default(), window, cx)
|
||||
})),
|
||||
tooltip_message: None,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
if let Some(extension_store) =
|
||||
ExtensionStore::try_global(cx).map(|extension_store| extension_store.read(cx))
|
||||
&& let Some(extension_id) = extension_store.outstanding_operations().keys().next()
|
||||
{
|
||||
return Some(Content {
|
||||
icon: Some(
|
||||
Icon::new(IconName::Download)
|
||||
.size(IconSize::Small)
|
||||
.into_any_element(),
|
||||
),
|
||||
message: format!("Updating {extension_id} extension…"),
|
||||
on_click: Some(Arc::new(|this, window, cx| {
|
||||
this.dismiss_error_message(&DismissErrorMessage, window, cx)
|
||||
})),
|
||||
tooltip_message: None,
|
||||
});
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn version_tooltip_message(version: &VersionCheckType) -> String {
|
||||
@@ -770,7 +781,7 @@ impl Render for ActivityIndicator {
|
||||
let result = h_flex()
|
||||
.id("activity-indicator")
|
||||
.on_action(cx.listener(Self::show_error_message))
|
||||
.on_action(cx.listener(Self::dismiss_message));
|
||||
.on_action(cx.listener(Self::dismiss_error_message));
|
||||
let Some(content) = self.content_to_render(cx) else {
|
||||
return result;
|
||||
};
|
||||
|
||||
@@ -39,6 +39,7 @@ heed.workspace = true
|
||||
http_client.workspace = true
|
||||
icons.workspace = true
|
||||
indoc.workspace = true
|
||||
itertools.workspace = true
|
||||
language.workspace = true
|
||||
language_model.workspace = true
|
||||
log.workspace = true
|
||||
@@ -62,7 +63,6 @@ time.workspace = true
|
||||
util.workspace = true
|
||||
uuid.workspace = true
|
||||
workspace-hack.workspace = true
|
||||
zed_env_vars.workspace = true
|
||||
zstd.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -2,20 +2,19 @@ pub mod agent_profile;
|
||||
pub mod context;
|
||||
pub mod context_server_tool;
|
||||
pub mod context_store;
|
||||
pub mod history_store;
|
||||
pub mod thread;
|
||||
pub mod thread_store;
|
||||
pub mod tool_use;
|
||||
|
||||
pub use context::{AgentContext, ContextId, ContextLoadResult};
|
||||
pub use context_store::ContextStore;
|
||||
use fs::Fs;
|
||||
use std::sync::Arc;
|
||||
pub use thread::{
|
||||
LastRestoreCheckpoint, Message, MessageCrease, MessageId, MessageSegment, Thread, ThreadError,
|
||||
ThreadEvent, ThreadFeedback, ThreadId, ThreadSummary, TokenUsageRatio,
|
||||
};
|
||||
pub use thread_store::{SerializedThread, TextThreadStore, ThreadStore};
|
||||
|
||||
pub fn init(fs: Arc<dyn Fs>, cx: &mut gpui::App) {
|
||||
thread_store::init(fs, cx);
|
||||
pub fn init(cx: &mut gpui::App) {
|
||||
thread_store::init(cx);
|
||||
}
|
||||
|
||||
@@ -49,10 +49,10 @@ impl AgentProfile {
|
||||
.unwrap_or_default(),
|
||||
};
|
||||
|
||||
update_settings_file(fs, cx, {
|
||||
update_settings_file::<AgentSettings>(fs, cx, {
|
||||
let id = id.clone();
|
||||
move |settings, _cx| {
|
||||
profile_settings.save_to_settings(id, settings).log_err();
|
||||
settings.create_profile(id, profile_settings).log_err();
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ use futures::future;
|
||||
use futures::{FutureExt, future::Shared};
|
||||
use gpui::{App, AppContext as _, ElementId, Entity, SharedString, Task};
|
||||
use icons::IconName;
|
||||
use language::Buffer;
|
||||
use language::{Buffer, ParseStatus};
|
||||
use language_model::{LanguageModelImage, LanguageModelRequestMessage, MessageContent};
|
||||
use project::{Project, ProjectEntryId, ProjectPath, Worktree};
|
||||
use prompt_store::{PromptStore, UserPromptId};
|
||||
@@ -18,7 +18,6 @@ use std::path::PathBuf;
|
||||
use std::{ops::Range, path::Path, sync::Arc};
|
||||
use text::{Anchor, OffsetRangeExt as _};
|
||||
use util::markdown::MarkdownCodeBlock;
|
||||
use util::rel_path::RelPath;
|
||||
use util::{ResultExt as _, post_inc};
|
||||
|
||||
pub const RULES_ICON: IconName = IconName::Reader;
|
||||
@@ -159,7 +158,7 @@ pub struct FileContextHandle {
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FileContext {
|
||||
pub handle: FileContextHandle,
|
||||
pub full_path: String,
|
||||
pub full_path: Arc<Path>,
|
||||
pub text: SharedString,
|
||||
pub is_outline: bool,
|
||||
}
|
||||
@@ -187,24 +186,51 @@ impl FileContextHandle {
|
||||
log::error!("file context missing path");
|
||||
return Task::ready(None);
|
||||
};
|
||||
let full_path = file.full_path(cx).to_string_lossy().into_owned();
|
||||
let full_path: Arc<Path> = file.full_path(cx).into();
|
||||
let rope = buffer_ref.as_rope().clone();
|
||||
let buffer = self.buffer.clone();
|
||||
|
||||
cx.spawn(async move |cx| {
|
||||
let buffer_content =
|
||||
outline::get_buffer_content_or_outline(buffer.clone(), Some(&full_path), &cx)
|
||||
.await
|
||||
.unwrap_or_else(|_| outline::BufferContent {
|
||||
text: rope.to_string(),
|
||||
is_outline: false,
|
||||
});
|
||||
// For large files, use outline instead of full content
|
||||
if rope.len() > outline::AUTO_OUTLINE_SIZE {
|
||||
// Wait until the buffer has been fully parsed, so we can read its outline
|
||||
if let Ok(mut parse_status) =
|
||||
buffer.read_with(cx, |buffer, _| buffer.parse_status())
|
||||
{
|
||||
while *parse_status.borrow() != ParseStatus::Idle {
|
||||
parse_status.changed().await.log_err();
|
||||
}
|
||||
|
||||
if let Ok(snapshot) = buffer.read_with(cx, |buffer, _| buffer.snapshot())
|
||||
&& let Some(outline) = snapshot.outline(None)
|
||||
{
|
||||
let items = outline
|
||||
.items
|
||||
.into_iter()
|
||||
.map(|item| item.to_point(&snapshot));
|
||||
|
||||
if let Ok(outline_text) =
|
||||
outline::render_outline(items, None, 0, usize::MAX).await
|
||||
{
|
||||
let context = AgentContext::File(FileContext {
|
||||
handle: self,
|
||||
full_path,
|
||||
text: outline_text.into(),
|
||||
is_outline: true,
|
||||
});
|
||||
return Some((context, vec![buffer]));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to full content if we couldn't build an outline
|
||||
// (or didn't need to because the file was small enough)
|
||||
let context = AgentContext::File(FileContext {
|
||||
handle: self,
|
||||
full_path,
|
||||
text: buffer_content.text.into(),
|
||||
is_outline: buffer_content.is_outline,
|
||||
text: rope.to_string().into(),
|
||||
is_outline: false,
|
||||
});
|
||||
Some((context, vec![buffer]))
|
||||
})
|
||||
@@ -236,14 +262,14 @@ pub struct DirectoryContextHandle {
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DirectoryContext {
|
||||
pub handle: DirectoryContextHandle,
|
||||
pub full_path: String,
|
||||
pub full_path: Arc<Path>,
|
||||
pub descendants: Vec<DirectoryContextDescendant>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DirectoryContextDescendant {
|
||||
/// Path within the directory.
|
||||
pub rel_path: Arc<RelPath>,
|
||||
pub rel_path: Arc<Path>,
|
||||
pub fenced_codeblock: SharedString,
|
||||
}
|
||||
|
||||
@@ -274,16 +300,13 @@ impl DirectoryContextHandle {
|
||||
}
|
||||
|
||||
let directory_path = entry.path.clone();
|
||||
let directory_full_path = worktree_ref
|
||||
.full_path(&directory_path)
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
let directory_full_path = worktree_ref.full_path(&directory_path).into();
|
||||
|
||||
let file_paths = collect_files_in_path(worktree_ref, &directory_path);
|
||||
let descendants_future = future::join_all(file_paths.into_iter().map(|path| {
|
||||
let worktree_ref = worktree.read(cx);
|
||||
let worktree_id = worktree_ref.id();
|
||||
let full_path = worktree_ref.full_path(&path).to_string_lossy().into_owned();
|
||||
let full_path = worktree_ref.full_path(&path);
|
||||
|
||||
let rel_path = path
|
||||
.strip_prefix(&directory_path)
|
||||
@@ -364,7 +387,7 @@ pub struct SymbolContextHandle {
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SymbolContext {
|
||||
pub handle: SymbolContextHandle,
|
||||
pub full_path: String,
|
||||
pub full_path: Arc<Path>,
|
||||
pub line_range: Range<Point>,
|
||||
pub text: SharedString,
|
||||
}
|
||||
@@ -403,7 +426,7 @@ impl SymbolContextHandle {
|
||||
log::error!("symbol context's file has no path");
|
||||
return Task::ready(None);
|
||||
};
|
||||
let full_path = file.full_path(cx).to_string_lossy().into_owned();
|
||||
let full_path = file.full_path(cx).into();
|
||||
let line_range = self.enclosing_range.to_point(&buffer_ref.snapshot());
|
||||
let text = self.text(cx);
|
||||
let buffer = self.buffer.clone();
|
||||
@@ -437,7 +460,7 @@ pub struct SelectionContextHandle {
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SelectionContext {
|
||||
pub handle: SelectionContextHandle,
|
||||
pub full_path: String,
|
||||
pub full_path: Arc<Path>,
|
||||
pub line_range: Range<Point>,
|
||||
pub text: SharedString,
|
||||
}
|
||||
@@ -476,7 +499,7 @@ impl SelectionContextHandle {
|
||||
let text = self.text(cx);
|
||||
let buffer = self.buffer.clone();
|
||||
let context = AgentContext::Selection(SelectionContext {
|
||||
full_path: full_path.to_string_lossy().into_owned(),
|
||||
full_path: full_path.into(),
|
||||
line_range: self.line_range(cx),
|
||||
text,
|
||||
handle: self,
|
||||
@@ -706,7 +729,7 @@ impl Display for RulesContext {
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ImageContext {
|
||||
pub project_path: Option<ProjectPath>,
|
||||
pub full_path: Option<String>,
|
||||
pub full_path: Option<Arc<Path>>,
|
||||
pub original_image: Arc<gpui::Image>,
|
||||
// TODO: handle this elsewhere and remove `ignore-interior-mutability` opt-out in clippy.toml
|
||||
// needed due to a false positive of `clippy::mutable_key_type`.
|
||||
@@ -972,7 +995,7 @@ pub fn load_context(
|
||||
})
|
||||
}
|
||||
|
||||
fn collect_files_in_path(worktree: &Worktree, path: &RelPath) -> Vec<Arc<RelPath>> {
|
||||
fn collect_files_in_path(worktree: &Worktree, path: &Path) -> Vec<Arc<Path>> {
|
||||
let mut files = Vec::new();
|
||||
|
||||
for entry in worktree.child_entries(path) {
|
||||
@@ -986,17 +1009,14 @@ fn collect_files_in_path(worktree: &Worktree, path: &RelPath) -> Vec<Arc<RelPath
|
||||
files
|
||||
}
|
||||
|
||||
fn codeblock_tag(full_path: &str, line_range: Option<Range<Point>>) -> String {
|
||||
fn codeblock_tag(full_path: &Path, line_range: Option<Range<Point>>) -> String {
|
||||
let mut result = String::new();
|
||||
|
||||
if let Some(extension) = Path::new(full_path)
|
||||
.extension()
|
||||
.and_then(|ext| ext.to_str())
|
||||
{
|
||||
if let Some(extension) = full_path.extension().and_then(|ext| ext.to_str()) {
|
||||
let _ = write!(result, "{} ", extension);
|
||||
}
|
||||
|
||||
let _ = write!(result, "{}", full_path);
|
||||
let _ = write!(result, "{}", full_path.display());
|
||||
|
||||
if let Some(range) = line_range {
|
||||
if range.start.row == range.end.row {
|
||||
|
||||
@@ -14,10 +14,7 @@ use futures::{self, FutureExt};
|
||||
use gpui::{App, Context, Entity, EventEmitter, Image, SharedString, Task, WeakEntity};
|
||||
use language::{Buffer, File as _};
|
||||
use language_model::LanguageModelImage;
|
||||
use project::{
|
||||
Project, ProjectItem, ProjectPath, Symbol, image_store::is_image_file,
|
||||
lsp_store::SymbolLocation,
|
||||
};
|
||||
use project::{Project, ProjectItem, ProjectPath, Symbol, image_store::is_image_file};
|
||||
use prompt_store::UserPromptId;
|
||||
use ref_cast::RefCast as _;
|
||||
use std::{
|
||||
@@ -312,7 +309,7 @@ impl ContextStore {
|
||||
let item = image_item.read(cx);
|
||||
this.insert_image(
|
||||
Some(item.project_path(cx)),
|
||||
Some(item.file.full_path(cx).to_string_lossy().into_owned()),
|
||||
Some(item.file.full_path(cx).into()),
|
||||
item.image.clone(),
|
||||
remove_if_exists,
|
||||
cx,
|
||||
@@ -328,7 +325,7 @@ impl ContextStore {
|
||||
fn insert_image(
|
||||
&mut self,
|
||||
project_path: Option<ProjectPath>,
|
||||
full_path: Option<String>,
|
||||
full_path: Option<Arc<Path>>,
|
||||
image: Arc<Image>,
|
||||
remove_if_exists: bool,
|
||||
cx: &mut Context<ContextStore>,
|
||||
@@ -503,7 +500,7 @@ impl ContextStore {
|
||||
let Some(context_path) = buffer.project_path(cx) else {
|
||||
return false;
|
||||
};
|
||||
if symbol.path != SymbolLocation::InProject(context_path) {
|
||||
if context_path != symbol.path {
|
||||
return false;
|
||||
}
|
||||
let context_range = context.range.to_point_utf16(&buffer.snapshot());
|
||||
|
||||
286
crates/agent/src/history_store.rs
Normal file
@@ -0,0 +1,286 @@
|
||||
use crate::{
|
||||
ThreadId,
|
||||
thread_store::{SerializedThreadMetadata, ThreadStore},
|
||||
};
|
||||
use anyhow::{Context as _, Result};
|
||||
use assistant_context::SavedContextMetadata;
|
||||
use chrono::{DateTime, Utc};
|
||||
use gpui::{App, AsyncApp, Entity, SharedString, Task, prelude::*};
|
||||
use itertools::Itertools;
|
||||
use paths::contexts_dir;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{collections::VecDeque, path::Path, sync::Arc, time::Duration};
|
||||
use util::ResultExt as _;
|
||||
|
||||
const MAX_RECENTLY_OPENED_ENTRIES: usize = 6;
|
||||
const NAVIGATION_HISTORY_PATH: &str = "agent-navigation-history.json";
|
||||
const SAVE_RECENTLY_OPENED_ENTRIES_DEBOUNCE: Duration = Duration::from_millis(50);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum HistoryEntry {
|
||||
Thread(SerializedThreadMetadata),
|
||||
Context(SavedContextMetadata),
|
||||
}
|
||||
|
||||
impl HistoryEntry {
|
||||
pub fn updated_at(&self) -> DateTime<Utc> {
|
||||
match self {
|
||||
HistoryEntry::Thread(thread) => thread.updated_at,
|
||||
HistoryEntry::Context(context) => context.mtime.to_utc(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn id(&self) -> HistoryEntryId {
|
||||
match self {
|
||||
HistoryEntry::Thread(thread) => HistoryEntryId::Thread(thread.id.clone()),
|
||||
HistoryEntry::Context(context) => HistoryEntryId::Context(context.path.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn title(&self) -> &SharedString {
|
||||
match self {
|
||||
HistoryEntry::Thread(thread) => &thread.summary,
|
||||
HistoryEntry::Context(context) => &context.title,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Generic identifier for a history entry.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum HistoryEntryId {
|
||||
Thread(ThreadId),
|
||||
Context(Arc<Path>),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
enum SerializedRecentOpen {
|
||||
Thread(String),
|
||||
ContextName(String),
|
||||
/// Old format which stores the full path
|
||||
Context(String),
|
||||
}
|
||||
|
||||
pub struct HistoryStore {
|
||||
thread_store: Entity<ThreadStore>,
|
||||
context_store: Entity<assistant_context::ContextStore>,
|
||||
recently_opened_entries: VecDeque<HistoryEntryId>,
|
||||
_subscriptions: Vec<gpui::Subscription>,
|
||||
_save_recently_opened_entries_task: Task<()>,
|
||||
}
|
||||
|
||||
impl HistoryStore {
|
||||
pub fn new(
|
||||
thread_store: Entity<ThreadStore>,
|
||||
context_store: Entity<assistant_context::ContextStore>,
|
||||
initial_recent_entries: impl IntoIterator<Item = HistoryEntryId>,
|
||||
cx: &mut Context<Self>,
|
||||
) -> Self {
|
||||
let subscriptions = vec![
|
||||
cx.observe(&thread_store, |_, _, cx| cx.notify()),
|
||||
cx.observe(&context_store, |_, _, cx| cx.notify()),
|
||||
];
|
||||
|
||||
cx.spawn(async move |this, cx| {
|
||||
let entries = Self::load_recently_opened_entries(cx).await.log_err()?;
|
||||
this.update(cx, |this, _| {
|
||||
this.recently_opened_entries
|
||||
.extend(
|
||||
entries.into_iter().take(
|
||||
MAX_RECENTLY_OPENED_ENTRIES
|
||||
.saturating_sub(this.recently_opened_entries.len()),
|
||||
),
|
||||
);
|
||||
})
|
||||
.ok()
|
||||
})
|
||||
.detach();
|
||||
|
||||
Self {
|
||||
thread_store,
|
||||
context_store,
|
||||
recently_opened_entries: initial_recent_entries.into_iter().collect(),
|
||||
_subscriptions: subscriptions,
|
||||
_save_recently_opened_entries_task: Task::ready(()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn entries(&self, cx: &mut Context<Self>) -> Vec<HistoryEntry> {
|
||||
let mut history_entries = Vec::new();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
if std::env::var("ZED_SIMULATE_NO_THREAD_HISTORY").is_ok() {
|
||||
return history_entries;
|
||||
}
|
||||
|
||||
history_entries.extend(
|
||||
self.thread_store
|
||||
.read(cx)
|
||||
.reverse_chronological_threads()
|
||||
.cloned()
|
||||
.map(HistoryEntry::Thread),
|
||||
);
|
||||
history_entries.extend(
|
||||
self.context_store
|
||||
.read(cx)
|
||||
.unordered_contexts()
|
||||
.cloned()
|
||||
.map(HistoryEntry::Context),
|
||||
);
|
||||
|
||||
history_entries.sort_unstable_by_key(|entry| std::cmp::Reverse(entry.updated_at()));
|
||||
history_entries
|
||||
}
|
||||
|
||||
pub fn recent_entries(&self, limit: usize, cx: &mut Context<Self>) -> Vec<HistoryEntry> {
|
||||
self.entries(cx).into_iter().take(limit).collect()
|
||||
}
|
||||
|
||||
pub fn recently_opened_entries(&self, cx: &App) -> Vec<HistoryEntry> {
|
||||
#[cfg(debug_assertions)]
|
||||
if std::env::var("ZED_SIMULATE_NO_THREAD_HISTORY").is_ok() {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let thread_entries = self
|
||||
.thread_store
|
||||
.read(cx)
|
||||
.reverse_chronological_threads()
|
||||
.flat_map(|thread| {
|
||||
self.recently_opened_entries
|
||||
.iter()
|
||||
.enumerate()
|
||||
.flat_map(|(index, entry)| match entry {
|
||||
HistoryEntryId::Thread(id) if &thread.id == id => {
|
||||
Some((index, HistoryEntry::Thread(thread.clone())))
|
||||
}
|
||||
_ => None,
|
||||
})
|
||||
});
|
||||
|
||||
let context_entries =
|
||||
self.context_store
|
||||
.read(cx)
|
||||
.unordered_contexts()
|
||||
.flat_map(|context| {
|
||||
self.recently_opened_entries
|
||||
.iter()
|
||||
.enumerate()
|
||||
.flat_map(|(index, entry)| match entry {
|
||||
HistoryEntryId::Context(path) if &context.path == path => {
|
||||
Some((index, HistoryEntry::Context(context.clone())))
|
||||
}
|
||||
_ => None,
|
||||
})
|
||||
});
|
||||
|
||||
thread_entries
|
||||
.chain(context_entries)
|
||||
// optimization to halt iteration early
|
||||
.take(self.recently_opened_entries.len())
|
||||
.sorted_unstable_by_key(|(index, _)| *index)
|
||||
.map(|(_, entry)| entry)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn save_recently_opened_entries(&mut self, cx: &mut Context<Self>) {
|
||||
let serialized_entries = self
|
||||
.recently_opened_entries
|
||||
.iter()
|
||||
.filter_map(|entry| match entry {
|
||||
HistoryEntryId::Context(path) => path.file_name().map(|file| {
|
||||
SerializedRecentOpen::ContextName(file.to_string_lossy().to_string())
|
||||
}),
|
||||
HistoryEntryId::Thread(id) => Some(SerializedRecentOpen::Thread(id.to_string())),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
self._save_recently_opened_entries_task = cx.spawn(async move |_, cx| {
|
||||
cx.background_executor()
|
||||
.timer(SAVE_RECENTLY_OPENED_ENTRIES_DEBOUNCE)
|
||||
.await;
|
||||
cx.background_spawn(async move {
|
||||
let path = paths::data_dir().join(NAVIGATION_HISTORY_PATH);
|
||||
let content = serde_json::to_string(&serialized_entries)?;
|
||||
std::fs::write(path, content)?;
|
||||
anyhow::Ok(())
|
||||
})
|
||||
.await
|
||||
.log_err();
|
||||
});
|
||||
}
|
||||
|
||||
fn load_recently_opened_entries(cx: &AsyncApp) -> Task<Result<Vec<HistoryEntryId>>> {
|
||||
cx.background_spawn(async move {
|
||||
let path = paths::data_dir().join(NAVIGATION_HISTORY_PATH);
|
||||
let contents = match smol::fs::read_to_string(path).await {
|
||||
Ok(it) => it,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(e)
|
||||
.context("deserializing persisted agent panel navigation history");
|
||||
}
|
||||
};
|
||||
let entries = serde_json::from_str::<Vec<SerializedRecentOpen>>(&contents)
|
||||
.context("deserializing persisted agent panel navigation history")?
|
||||
.into_iter()
|
||||
.take(MAX_RECENTLY_OPENED_ENTRIES)
|
||||
.flat_map(|entry| match entry {
|
||||
SerializedRecentOpen::Thread(id) => {
|
||||
Some(HistoryEntryId::Thread(id.as_str().into()))
|
||||
}
|
||||
SerializedRecentOpen::ContextName(file_name) => Some(HistoryEntryId::Context(
|
||||
contexts_dir().join(file_name).into(),
|
||||
)),
|
||||
SerializedRecentOpen::Context(path) => {
|
||||
Path::new(&path).file_name().map(|file_name| {
|
||||
HistoryEntryId::Context(contexts_dir().join(file_name).into())
|
||||
})
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
Ok(entries)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn push_recently_opened_entry(&mut self, entry: HistoryEntryId, cx: &mut Context<Self>) {
|
||||
self.recently_opened_entries
|
||||
.retain(|old_entry| old_entry != &entry);
|
||||
self.recently_opened_entries.push_front(entry);
|
||||
self.recently_opened_entries
|
||||
.truncate(MAX_RECENTLY_OPENED_ENTRIES);
|
||||
self.save_recently_opened_entries(cx);
|
||||
}
|
||||
|
||||
pub fn remove_recently_opened_thread(&mut self, id: ThreadId, cx: &mut Context<Self>) {
|
||||
self.recently_opened_entries.retain(
|
||||
|entry| !matches!(entry, HistoryEntryId::Thread(thread_id) if thread_id == &id),
|
||||
);
|
||||
self.save_recently_opened_entries(cx);
|
||||
}
|
||||
|
||||
pub fn replace_recently_opened_text_thread(
|
||||
&mut self,
|
||||
old_path: &Path,
|
||||
new_path: &Arc<Path>,
|
||||
cx: &mut Context<Self>,
|
||||
) {
|
||||
for entry in &mut self.recently_opened_entries {
|
||||
match entry {
|
||||
HistoryEntryId::Context(path) if path.as_ref() == old_path => {
|
||||
*entry = HistoryEntryId::Context(new_path.clone());
|
||||
break;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
self.save_recently_opened_entries(cx);
|
||||
}
|
||||
|
||||
pub fn remove_recently_opened_entry(&mut self, entry: &HistoryEntryId, cx: &mut Context<Self>) {
|
||||
self.recently_opened_entries
|
||||
.retain(|old_entry| old_entry != entry);
|
||||
self.save_recently_opened_entries(cx);
|
||||
}
|
||||
}
|
||||
@@ -234,6 +234,7 @@ impl MessageSegment {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct ProjectSnapshot {
|
||||
pub worktree_snapshots: Vec<WorktreeSnapshot>,
|
||||
pub unsaved_buffer_paths: Vec<String>,
|
||||
pub timestamp: DateTime<Utc>,
|
||||
}
|
||||
|
||||
@@ -1276,6 +1277,62 @@ impl Thread {
|
||||
);
|
||||
}
|
||||
|
||||
pub fn retry_last_completion(
|
||||
&mut self,
|
||||
window: Option<AnyWindowHandle>,
|
||||
cx: &mut Context<Self>,
|
||||
) {
|
||||
// Clear any existing error state
|
||||
self.retry_state = None;
|
||||
|
||||
// Use the last error context if available, otherwise fall back to configured model
|
||||
let (model, intent) = if let Some((model, intent)) = self.last_error_context.take() {
|
||||
(model, intent)
|
||||
} else if let Some(configured_model) = self.configured_model.as_ref() {
|
||||
let model = configured_model.model.clone();
|
||||
let intent = if self.has_pending_tool_uses() {
|
||||
CompletionIntent::ToolResults
|
||||
} else {
|
||||
CompletionIntent::UserPrompt
|
||||
};
|
||||
(model, intent)
|
||||
} else if let Some(configured_model) = self.get_or_init_configured_model(cx) {
|
||||
let model = configured_model.model.clone();
|
||||
let intent = if self.has_pending_tool_uses() {
|
||||
CompletionIntent::ToolResults
|
||||
} else {
|
||||
CompletionIntent::UserPrompt
|
||||
};
|
||||
(model, intent)
|
||||
} else {
|
||||
return;
|
||||
};
|
||||
|
||||
self.send_to_model(model, intent, window, cx);
|
||||
}
|
||||
|
||||
pub fn enable_burn_mode_and_retry(
|
||||
&mut self,
|
||||
window: Option<AnyWindowHandle>,
|
||||
cx: &mut Context<Self>,
|
||||
) {
|
||||
self.completion_mode = CompletionMode::Burn;
|
||||
cx.emit(ThreadEvent::ProfileChanged);
|
||||
self.retry_last_completion(window, cx);
|
||||
}
|
||||
|
||||
pub fn used_tools_since_last_user_message(&self) -> bool {
|
||||
for message in self.messages.iter().rev() {
|
||||
if self.tool_use.message_has_tool_results(message.id) {
|
||||
return true;
|
||||
} else if message.role == Role::User {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
pub fn to_completion_request(
|
||||
&self,
|
||||
model: Arc<dyn LanguageModel>,
|
||||
@@ -2800,11 +2857,27 @@ impl Thread {
|
||||
.map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
|
||||
.collect();
|
||||
|
||||
cx.spawn(async move |_, _| {
|
||||
cx.spawn(async move |_, cx| {
|
||||
let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
|
||||
|
||||
let mut unsaved_buffers = Vec::new();
|
||||
cx.update(|app_cx| {
|
||||
let buffer_store = project.read(app_cx).buffer_store();
|
||||
for buffer_handle in buffer_store.read(app_cx).buffers() {
|
||||
let buffer = buffer_handle.read(app_cx);
|
||||
if buffer.is_dirty()
|
||||
&& let Some(file) = buffer.file()
|
||||
{
|
||||
let path = file.path().to_string_lossy().to_string();
|
||||
unsaved_buffers.push(path);
|
||||
}
|
||||
}
|
||||
})
|
||||
.ok();
|
||||
|
||||
Arc::new(ProjectSnapshot {
|
||||
worktree_snapshots,
|
||||
unsaved_buffer_paths: unsaved_buffers,
|
||||
timestamp: Utc::now(),
|
||||
})
|
||||
})
|
||||
@@ -2819,7 +2892,7 @@ impl Thread {
|
||||
// Get worktree path and snapshot
|
||||
let worktree_info = cx.update(|app_cx| {
|
||||
let worktree = worktree.read(app_cx);
|
||||
let path = worktree.abs_path().to_string_lossy().into_owned();
|
||||
let path = worktree.abs_path().to_string_lossy().to_string();
|
||||
let snapshot = worktree.snapshot();
|
||||
(path, snapshot)
|
||||
});
|
||||
@@ -3199,10 +3272,9 @@ mod tests {
|
||||
|
||||
// Test-specific constants
|
||||
const TEST_RATE_LIMIT_RETRY_SECS: u64 = 30;
|
||||
use agent_settings::{AgentProfileId, AgentSettings};
|
||||
use agent_settings::{AgentProfileId, AgentSettings, LanguageModelParameters};
|
||||
use assistant_tool::ToolRegistry;
|
||||
use assistant_tools;
|
||||
use fs::Fs;
|
||||
use futures::StreamExt;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::stream::BoxStream;
|
||||
@@ -3217,18 +3289,18 @@ mod tests {
|
||||
use project::{FakeFs, Project};
|
||||
use prompt_store::PromptBuilder;
|
||||
use serde_json::json;
|
||||
use settings::{LanguageModelParameters, Settings, SettingsStore};
|
||||
use settings::{Settings, SettingsStore};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use theme::ThemeSettings;
|
||||
use util::path;
|
||||
use workspace::Workspace;
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_message_with_context(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(
|
||||
&fs,
|
||||
cx,
|
||||
json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
|
||||
)
|
||||
@@ -3303,10 +3375,9 @@ fn main() {{
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_only_include_new_contexts(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(
|
||||
&fs,
|
||||
cx,
|
||||
json!({
|
||||
"file1.rs": "fn function1() {}\n",
|
||||
@@ -3460,10 +3531,9 @@ fn main() {{
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_message_without_files(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(
|
||||
&fs,
|
||||
cx,
|
||||
json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
|
||||
)
|
||||
@@ -3540,10 +3610,9 @@ fn main() {{
|
||||
#[gpui::test]
|
||||
#[ignore] // turn this test on when project_notifications tool is re-enabled
|
||||
async fn test_stale_buffer_notification(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(
|
||||
&fs,
|
||||
cx,
|
||||
json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
|
||||
)
|
||||
@@ -3669,10 +3738,9 @@ fn main() {{
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_storing_profile_setting_per_thread(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(
|
||||
&fs,
|
||||
cx,
|
||||
json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
|
||||
)
|
||||
@@ -3692,10 +3760,9 @@ fn main() {{
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_serializing_thread_profile(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(
|
||||
&fs,
|
||||
cx,
|
||||
json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
|
||||
)
|
||||
@@ -3736,10 +3803,9 @@ fn main() {{
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_temperature_setting(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(
|
||||
&fs,
|
||||
cx,
|
||||
json!({"code.rs": "fn main() {\n println!(\"Hello, world!\");\n}"}),
|
||||
)
|
||||
@@ -3831,9 +3897,9 @@ fn main() {{
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_thread_summary(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(&fs, cx, json!({})).await;
|
||||
let project = create_test_project(cx, json!({})).await;
|
||||
|
||||
let (_, _thread_store, thread, _context_store, model) =
|
||||
setup_test_environment(cx, project.clone()).await;
|
||||
@@ -3916,9 +3982,9 @@ fn main() {{
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_thread_summary_error_set_manually(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(&fs, cx, json!({})).await;
|
||||
let project = create_test_project(cx, json!({})).await;
|
||||
|
||||
let (_, _thread_store, thread, _context_store, model) =
|
||||
setup_test_environment(cx, project.clone()).await;
|
||||
@@ -3938,9 +4004,9 @@ fn main() {{
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_thread_summary_error_retry(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(&fs, cx, json!({})).await;
|
||||
let project = create_test_project(cx, json!({})).await;
|
||||
|
||||
let (_, _thread_store, thread, _context_store, model) =
|
||||
setup_test_environment(cx, project.clone()).await;
|
||||
@@ -4092,9 +4158,9 @@ fn main() {{
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_retry_on_overloaded_error(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(&fs, cx, json!({})).await;
|
||||
let project = create_test_project(cx, json!({})).await;
|
||||
let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
|
||||
|
||||
// Enable Burn Mode to allow retries
|
||||
@@ -4170,9 +4236,9 @@ fn main() {{
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_retry_on_internal_server_error(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(&fs, cx, json!({})).await;
|
||||
let project = create_test_project(cx, json!({})).await;
|
||||
let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
|
||||
|
||||
// Enable Burn Mode to allow retries
|
||||
@@ -4252,9 +4318,9 @@ fn main() {{
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_exponential_backoff_on_retries(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(&fs, cx, json!({})).await;
|
||||
let project = create_test_project(cx, json!({})).await;
|
||||
let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
|
||||
|
||||
// Enable Burn Mode to allow retries
|
||||
@@ -4372,9 +4438,9 @@ fn main() {{
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_max_retries_exceeded(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(&fs, cx, json!({})).await;
|
||||
let project = create_test_project(cx, json!({})).await;
|
||||
let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
|
||||
|
||||
// Enable Burn Mode to allow retries
|
||||
@@ -4463,9 +4529,9 @@ fn main() {{
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_retry_message_removed_on_retry(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(&fs, cx, json!({})).await;
|
||||
let project = create_test_project(cx, json!({})).await;
|
||||
let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
|
||||
|
||||
// Enable Burn Mode to allow retries
|
||||
@@ -4636,9 +4702,9 @@ fn main() {{
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_successful_completion_clears_retry_state(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(&fs, cx, json!({})).await;
|
||||
let project = create_test_project(cx, json!({})).await;
|
||||
let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
|
||||
|
||||
// Enable Burn Mode to allow retries
|
||||
@@ -4802,9 +4868,9 @@ fn main() {{
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_rate_limit_retry_single_attempt(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(&fs, cx, json!({})).await;
|
||||
let project = create_test_project(cx, json!({})).await;
|
||||
let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
|
||||
|
||||
// Enable Burn Mode to allow retries
|
||||
@@ -4987,9 +5053,9 @@ fn main() {{
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_ui_only_messages_not_sent_to_model(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(&fs, cx, json!({})).await;
|
||||
let project = create_test_project(cx, json!({})).await;
|
||||
let (_, _, thread, _, model) = setup_test_environment(cx, project.clone()).await;
|
||||
|
||||
// Insert a regular user message
|
||||
@@ -5087,9 +5153,9 @@ fn main() {{
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_no_retry_without_burn_mode(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(&fs, cx, json!({})).await;
|
||||
let project = create_test_project(cx, json!({})).await;
|
||||
let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
|
||||
|
||||
// Ensure we're in Normal mode (not Burn mode)
|
||||
@@ -5160,9 +5226,9 @@ fn main() {{
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_retry_canceled_on_stop(cx: &mut TestAppContext) {
|
||||
let fs = init_test_settings(cx);
|
||||
init_test_settings(cx);
|
||||
|
||||
let project = create_test_project(&fs, cx, json!({})).await;
|
||||
let project = create_test_project(cx, json!({})).await;
|
||||
let (_, _, thread, _, _base_model) = setup_test_environment(cx, project.clone()).await;
|
||||
|
||||
// Enable Burn Mode to allow retries
|
||||
@@ -5268,8 +5334,7 @@ fn main() {{
|
||||
cx.run_until_parked();
|
||||
}
|
||||
|
||||
fn init_test_settings(cx: &mut TestAppContext) -> Arc<dyn Fs> {
|
||||
let fs = FakeFs::new(cx.executor());
|
||||
fn init_test_settings(cx: &mut TestAppContext) {
|
||||
cx.update(|cx| {
|
||||
let settings_store = SettingsStore::test(cx);
|
||||
cx.set_global(settings_store);
|
||||
@@ -5277,10 +5342,10 @@ fn main() {{
|
||||
Project::init_settings(cx);
|
||||
AgentSettings::register(cx);
|
||||
prompt_store::init(cx);
|
||||
thread_store::init(fs.clone(), cx);
|
||||
thread_store::init(cx);
|
||||
workspace::init_settings(cx);
|
||||
language_model::init_settings(cx);
|
||||
theme::init(theme::LoadThemes::JustBase, cx);
|
||||
ThemeSettings::register(cx);
|
||||
ToolRegistry::default_global(cx);
|
||||
assistant_tool::init(cx);
|
||||
|
||||
@@ -5291,17 +5356,16 @@ fn main() {{
|
||||
));
|
||||
assistant_tools::init(http_client, cx);
|
||||
});
|
||||
fs
|
||||
}
|
||||
|
||||
// Helper to create a test project with test files
|
||||
async fn create_test_project(
|
||||
fs: &Arc<dyn Fs>,
|
||||
cx: &mut TestAppContext,
|
||||
files: serde_json::Value,
|
||||
) -> Entity<Project> {
|
||||
fs.as_fake().insert_tree(path!("/test"), files).await;
|
||||
Project::test(fs.clone(), [path!("/test").as_ref()], cx).await
|
||||
let fs = FakeFs::new(cx.executor());
|
||||
fs.insert_tree(path!("/test"), files).await;
|
||||
Project::test(fs, [path!("/test").as_ref()], cx).await
|
||||
}
|
||||
|
||||
async fn setup_test_environment(
|
||||
|
||||
@@ -10,7 +10,6 @@ use assistant_tool::{Tool, ToolId, ToolWorkingSet};
|
||||
use chrono::{DateTime, Utc};
|
||||
use collections::HashMap;
|
||||
use context_server::ContextServerId;
|
||||
use fs::{Fs, RemoveOptions};
|
||||
use futures::{
|
||||
FutureExt as _, StreamExt as _,
|
||||
channel::{mpsc, oneshot},
|
||||
@@ -38,11 +37,12 @@ use std::{
|
||||
cell::{Ref, RefCell},
|
||||
path::{Path, PathBuf},
|
||||
rc::Rc,
|
||||
sync::{Arc, LazyLock, Mutex},
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
use util::{ResultExt as _, rel_path::RelPath};
|
||||
use util::ResultExt as _;
|
||||
|
||||
use zed_env_vars::ZED_STATELESS;
|
||||
pub static ZED_STATELESS: std::sync::LazyLock<bool> =
|
||||
std::sync::LazyLock::new(|| std::env::var("ZED_STATELESS").is_ok_and(|v| !v.is_empty()));
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum DataType {
|
||||
@@ -74,22 +74,20 @@ impl Column for DataType {
|
||||
}
|
||||
}
|
||||
|
||||
static RULES_FILE_NAMES: LazyLock<[&RelPath; 9]> = LazyLock::new(|| {
|
||||
[
|
||||
RelPath::unix(".rules").unwrap(),
|
||||
RelPath::unix(".cursorrules").unwrap(),
|
||||
RelPath::unix(".windsurfrules").unwrap(),
|
||||
RelPath::unix(".clinerules").unwrap(),
|
||||
RelPath::unix(".github/copilot-instructions.md").unwrap(),
|
||||
RelPath::unix("CLAUDE.md").unwrap(),
|
||||
RelPath::unix("AGENT.md").unwrap(),
|
||||
RelPath::unix("AGENTS.md").unwrap(),
|
||||
RelPath::unix("GEMINI.md").unwrap(),
|
||||
]
|
||||
});
|
||||
const RULES_FILE_NAMES: [&str; 9] = [
|
||||
".rules",
|
||||
".cursorrules",
|
||||
".windsurfrules",
|
||||
".clinerules",
|
||||
".github/copilot-instructions.md",
|
||||
"CLAUDE.md",
|
||||
"AGENT.md",
|
||||
"AGENTS.md",
|
||||
"GEMINI.md",
|
||||
];
|
||||
|
||||
pub fn init(fs: Arc<dyn Fs>, cx: &mut App) {
|
||||
ThreadsDatabase::init(fs, cx);
|
||||
pub fn init(cx: &mut App) {
|
||||
ThreadsDatabase::init(cx);
|
||||
}
|
||||
|
||||
/// A system prompt shared by all threads created by this ThreadStore
|
||||
@@ -234,10 +232,11 @@ impl ThreadStore {
|
||||
self.enqueue_system_prompt_reload();
|
||||
}
|
||||
project::Event::WorktreeUpdatedEntries(_, items) => {
|
||||
if items
|
||||
.iter()
|
||||
.any(|(path, _, _)| RULES_FILE_NAMES.iter().any(|name| path.as_ref() == *name))
|
||||
{
|
||||
if items.iter().any(|(path, _, _)| {
|
||||
RULES_FILE_NAMES
|
||||
.iter()
|
||||
.any(|name| path.as_ref() == Path::new(name))
|
||||
}) {
|
||||
self.enqueue_system_prompt_reload();
|
||||
}
|
||||
}
|
||||
@@ -329,7 +328,7 @@ impl ThreadStore {
|
||||
cx: &mut App,
|
||||
) -> Task<(WorktreeContext, Option<RulesLoadingError>)> {
|
||||
let tree = worktree.read(cx);
|
||||
let root_name = tree.root_name_str().into();
|
||||
let root_name = tree.root_name().into();
|
||||
let abs_path = tree.abs_path();
|
||||
|
||||
let mut context = WorktreeContext {
|
||||
@@ -871,13 +870,13 @@ impl ThreadsDatabase {
|
||||
GlobalThreadsDatabase::global(cx).0.clone()
|
||||
}
|
||||
|
||||
fn init(fs: Arc<dyn Fs>, cx: &mut App) {
|
||||
fn init(cx: &mut App) {
|
||||
let executor = cx.background_executor().clone();
|
||||
let database_future = executor
|
||||
.spawn({
|
||||
let executor = executor.clone();
|
||||
let threads_dir = paths::data_dir().join("threads");
|
||||
async move { ThreadsDatabase::new(fs, threads_dir, executor).await }
|
||||
async move { ThreadsDatabase::new(threads_dir, executor) }
|
||||
})
|
||||
.then(|result| future::ready(result.map(Arc::new).map_err(Arc::new)))
|
||||
.boxed()
|
||||
@@ -886,17 +885,13 @@ impl ThreadsDatabase {
|
||||
cx.set_global(GlobalThreadsDatabase(database_future));
|
||||
}
|
||||
|
||||
pub async fn new(
|
||||
fs: Arc<dyn Fs>,
|
||||
threads_dir: PathBuf,
|
||||
executor: BackgroundExecutor,
|
||||
) -> Result<Self> {
|
||||
fs.create_dir(&threads_dir).await?;
|
||||
pub fn new(threads_dir: PathBuf, executor: BackgroundExecutor) -> Result<Self> {
|
||||
std::fs::create_dir_all(&threads_dir)?;
|
||||
|
||||
let sqlite_path = threads_dir.join("threads.db");
|
||||
let mdb_path = threads_dir.join("threads-db.1.mdb");
|
||||
|
||||
let needs_migration_from_heed = fs.is_file(&mdb_path).await;
|
||||
let needs_migration_from_heed = mdb_path.exists();
|
||||
|
||||
let connection = if *ZED_STATELESS {
|
||||
Connection::open_memory(Some("THREAD_FALLBACK_DB"))
|
||||
@@ -938,14 +933,7 @@ impl ThreadsDatabase {
|
||||
.spawn(async move {
|
||||
log::info!("Starting threads.db migration");
|
||||
Self::migrate_from_heed(&mdb_path, db_connection, executor_clone)?;
|
||||
fs.remove_dir(
|
||||
&mdb_path,
|
||||
RemoveOptions {
|
||||
recursive: true,
|
||||
ignore_if_not_exists: true,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
std::fs::remove_dir_all(mdb_path)?;
|
||||
log::info!("threads.db migrated to sqlite");
|
||||
Ok::<(), anyhow::Error>(())
|
||||
})
|
||||
|
||||
@@ -48,6 +48,7 @@ log.workspace = true
|
||||
open.workspace = true
|
||||
parking_lot.workspace = true
|
||||
paths.workspace = true
|
||||
portable-pty.workspace = true
|
||||
project.workspace = true
|
||||
prompt_store.workspace = true
|
||||
rust-embed.workspace = true
|
||||
@@ -67,8 +68,8 @@ util.workspace = true
|
||||
uuid.workspace = true
|
||||
watch.workspace = true
|
||||
web_search.workspace = true
|
||||
which.workspace = true
|
||||
workspace-hack.workspace = true
|
||||
zed_env_vars.workspace = true
|
||||
zstd.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -2,15 +2,15 @@ use crate::{
|
||||
ContextServerRegistry, Thread, ThreadEvent, ThreadsDatabase, ToolCallAuthorization,
|
||||
UserMessageContent, templates::Templates,
|
||||
};
|
||||
use crate::{HistoryStore, TerminalHandle, ThreadEnvironment, TitleUpdated, TokenUsageUpdated};
|
||||
use crate::{HistoryStore, TitleUpdated, TokenUsageUpdated};
|
||||
use acp_thread::{AcpThread, AgentModelSelector};
|
||||
use action_log::ActionLog;
|
||||
use agent_client_protocol as acp;
|
||||
use agent_settings::AgentSettings;
|
||||
use anyhow::{Context as _, Result, anyhow};
|
||||
use collections::{HashSet, IndexMap};
|
||||
use fs::Fs;
|
||||
use futures::channel::{mpsc, oneshot};
|
||||
use futures::future::Shared;
|
||||
use futures::channel::mpsc;
|
||||
use futures::{StreamExt, future};
|
||||
use gpui::{
|
||||
App, AppContext, AsyncApp, Context, Entity, SharedString, Subscription, Task, WeakEntity,
|
||||
@@ -20,14 +20,13 @@ use project::{Project, ProjectItem, ProjectPath, Worktree};
|
||||
use prompt_store::{
|
||||
ProjectContext, PromptId, PromptStore, RulesFileContext, UserRulesContext, WorktreeContext,
|
||||
};
|
||||
use settings::{LanguageModelSelection, update_settings_file};
|
||||
use settings::update_settings_file;
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::path::Path;
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
use util::ResultExt;
|
||||
use util::rel_path::RelPath;
|
||||
|
||||
const RULES_FILE_NAMES: [&str; 9] = [
|
||||
".rules",
|
||||
@@ -57,7 +56,7 @@ struct Session {
|
||||
|
||||
pub struct LanguageModels {
|
||||
/// Access language model by ID
|
||||
models: HashMap<acp::ModelId, Arc<dyn LanguageModel>>,
|
||||
models: HashMap<acp_thread::AgentModelId, Arc<dyn LanguageModel>>,
|
||||
/// Cached list for returning language model information
|
||||
model_list: acp_thread::AgentModelList,
|
||||
refresh_models_rx: watch::Receiver<()>,
|
||||
@@ -133,7 +132,10 @@ impl LanguageModels {
|
||||
self.refresh_models_rx.clone()
|
||||
}
|
||||
|
||||
pub fn model_from_id(&self, model_id: &acp::ModelId) -> Option<Arc<dyn LanguageModel>> {
|
||||
pub fn model_from_id(
|
||||
&self,
|
||||
model_id: &acp_thread::AgentModelId,
|
||||
) -> Option<Arc<dyn LanguageModel>> {
|
||||
self.models.get(model_id).cloned()
|
||||
}
|
||||
|
||||
@@ -144,13 +146,12 @@ impl LanguageModels {
|
||||
acp_thread::AgentModelInfo {
|
||||
id: Self::model_id(model),
|
||||
name: model.name().0,
|
||||
description: None,
|
||||
icon: Some(provider.icon()),
|
||||
}
|
||||
}
|
||||
|
||||
fn model_id(model: &Arc<dyn LanguageModel>) -> acp::ModelId {
|
||||
acp::ModelId(format!("{}/{}", model.provider_id().0, model.id().0).into())
|
||||
fn model_id(model: &Arc<dyn LanguageModel>) -> acp_thread::AgentModelId {
|
||||
acp_thread::AgentModelId(format!("{}/{}", model.provider_id().0, model.id().0).into())
|
||||
}
|
||||
|
||||
fn authenticate_all_language_model_providers(cx: &mut App) -> Task<()> {
|
||||
@@ -164,41 +165,33 @@ impl LanguageModels {
|
||||
cx.background_spawn(async move {
|
||||
for (provider_id, provider_name, authenticate_task) in authenticate_all_providers {
|
||||
if let Err(err) = authenticate_task.await {
|
||||
match err {
|
||||
language_model::AuthenticateError::CredentialsNotFound => {
|
||||
// Since we're authenticating these providers in the
|
||||
// background for the purposes of populating the
|
||||
// language selector, we don't care about providers
|
||||
// where the credentials are not found.
|
||||
}
|
||||
language_model::AuthenticateError::ConnectionRefused => {
|
||||
// Not logging connection refused errors as they are mostly from LM Studio's noisy auth failures.
|
||||
// LM Studio only has one auth method (endpoint call) which fails for users who haven't enabled it.
|
||||
// TODO: Better manage LM Studio auth logic to avoid these noisy failures.
|
||||
}
|
||||
_ => {
|
||||
// Some providers have noisy failure states that we
|
||||
// don't want to spam the logs with every time the
|
||||
// language model selector is initialized.
|
||||
//
|
||||
// Ideally these should have more clear failure modes
|
||||
// that we know are safe to ignore here, like what we do
|
||||
// with `CredentialsNotFound` above.
|
||||
match provider_id.0.as_ref() {
|
||||
"lmstudio" | "ollama" => {
|
||||
// LM Studio and Ollama both make fetch requests to the local APIs to determine if they are "authenticated".
|
||||
//
|
||||
// These fail noisily, so we don't log them.
|
||||
}
|
||||
"copilot_chat" => {
|
||||
// Copilot Chat returns an error if Copilot is not enabled, so we don't log those errors.
|
||||
}
|
||||
_ => {
|
||||
log::error!(
|
||||
"Failed to authenticate provider: {}: {err}",
|
||||
provider_name.0
|
||||
);
|
||||
}
|
||||
if matches!(err, language_model::AuthenticateError::CredentialsNotFound) {
|
||||
// Since we're authenticating these providers in the
|
||||
// background for the purposes of populating the
|
||||
// language selector, we don't care about providers
|
||||
// where the credentials are not found.
|
||||
} else {
|
||||
// Some providers have noisy failure states that we
|
||||
// don't want to spam the logs with every time the
|
||||
// language model selector is initialized.
|
||||
//
|
||||
// Ideally these should have more clear failure modes
|
||||
// that we know are safe to ignore here, like what we do
|
||||
// with `CredentialsNotFound` above.
|
||||
match provider_id.0.as_ref() {
|
||||
"lmstudio" | "ollama" => {
|
||||
// LM Studio and Ollama both make fetch requests to the local APIs to determine if they are "authenticated".
|
||||
//
|
||||
// These fail noisily, so we don't log them.
|
||||
}
|
||||
"copilot_chat" => {
|
||||
// Copilot Chat returns an error if Copilot is not enabled, so we don't log those errors.
|
||||
}
|
||||
_ => {
|
||||
log::error!(
|
||||
"Failed to authenticate provider: {}: {err}",
|
||||
provider_name.0
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -283,6 +276,13 @@ impl NativeAgent {
|
||||
cx: &mut Context<Self>,
|
||||
) -> Entity<AcpThread> {
|
||||
let connection = Rc::new(NativeAgentConnection(cx.entity()));
|
||||
let registry = LanguageModelRegistry::read_global(cx);
|
||||
let summarization_model = registry.thread_summary_model().map(|c| c.model);
|
||||
|
||||
thread_handle.update(cx, |thread, cx| {
|
||||
thread.set_summarization_model(summarization_model, cx);
|
||||
thread.add_default_tools(cx)
|
||||
});
|
||||
|
||||
let thread = thread_handle.read(cx);
|
||||
let session_id = thread.id().clone();
|
||||
@@ -301,20 +301,6 @@ impl NativeAgent {
|
||||
cx,
|
||||
)
|
||||
});
|
||||
|
||||
let registry = LanguageModelRegistry::read_global(cx);
|
||||
let summarization_model = registry.thread_summary_model().map(|c| c.model);
|
||||
|
||||
thread_handle.update(cx, |thread, cx| {
|
||||
thread.set_summarization_model(summarization_model, cx);
|
||||
thread.add_default_tools(
|
||||
Rc::new(AcpThreadEnvironment {
|
||||
acp_thread: acp_thread.downgrade(),
|
||||
}) as _,
|
||||
cx,
|
||||
)
|
||||
});
|
||||
|
||||
let subscriptions = vec![
|
||||
cx.observe_release(&acp_thread, |this, acp_thread, _cx| {
|
||||
this.sessions.remove(acp_thread.session_id());
|
||||
@@ -435,7 +421,7 @@ impl NativeAgent {
|
||||
cx: &mut App,
|
||||
) -> Task<(WorktreeContext, Option<RulesLoadingError>)> {
|
||||
let tree = worktree.read(cx);
|
||||
let root_name = tree.root_name_str().into();
|
||||
let root_name = tree.root_name().into();
|
||||
let abs_path = tree.abs_path();
|
||||
|
||||
let mut context = WorktreeContext {
|
||||
@@ -475,7 +461,7 @@ impl NativeAgent {
|
||||
.into_iter()
|
||||
.filter_map(|name| {
|
||||
worktree
|
||||
.entry_for_path(RelPath::unix(name).unwrap())
|
||||
.entry_for_path(name)
|
||||
.filter(|entry| entry.is_file())
|
||||
.map(|entry| entry.path.clone())
|
||||
})
|
||||
@@ -559,7 +545,7 @@ impl NativeAgent {
|
||||
if items.iter().any(|(path, _, _)| {
|
||||
RULES_FILE_NAMES
|
||||
.iter()
|
||||
.any(|name| path.as_ref() == RelPath::unix(name).unwrap())
|
||||
.any(|name| path.as_ref() == Path::new(name))
|
||||
}) {
|
||||
self.project_context_needs_refresh.send(()).ok();
|
||||
}
|
||||
@@ -753,7 +739,6 @@ impl NativeAgentConnection {
|
||||
acp::ContentBlock::Text(acp::TextContent {
|
||||
text,
|
||||
annotations: None,
|
||||
meta: None,
|
||||
}),
|
||||
false,
|
||||
cx,
|
||||
@@ -766,7 +751,6 @@ impl NativeAgentConnection {
|
||||
acp::ContentBlock::Text(acp::TextContent {
|
||||
text,
|
||||
annotations: None,
|
||||
meta: None,
|
||||
}),
|
||||
true,
|
||||
cx,
|
||||
@@ -778,17 +762,18 @@ impl NativeAgentConnection {
|
||||
options,
|
||||
response,
|
||||
}) => {
|
||||
let outcome_task = acp_thread.update(cx, |thread, cx| {
|
||||
thread.request_tool_call_authorization(
|
||||
tool_call, options, true, cx,
|
||||
)
|
||||
})??;
|
||||
let recv = acp_thread.update(cx, |thread, cx| {
|
||||
thread.request_tool_call_authorization(tool_call, options, cx)
|
||||
})?;
|
||||
cx.background_spawn(async move {
|
||||
if let acp::RequestPermissionOutcome::Selected { option_id } =
|
||||
outcome_task.await
|
||||
if let Some(recv) = recv.log_err()
|
||||
&& let Some(option) = recv
|
||||
.await
|
||||
.context("authorization sender was dropped")
|
||||
.log_err()
|
||||
{
|
||||
response
|
||||
.send(option_id)
|
||||
.send(option)
|
||||
.map(|_| anyhow!("authorization receiver was dropped"))
|
||||
.log_err();
|
||||
}
|
||||
@@ -812,10 +797,7 @@ impl NativeAgentConnection {
|
||||
}
|
||||
ThreadEvent::Stop(stop_reason) => {
|
||||
log::debug!("Assistant message complete: {:?}", stop_reason);
|
||||
return Ok(acp::PromptResponse {
|
||||
stop_reason,
|
||||
meta: None,
|
||||
});
|
||||
return Ok(acp::PromptResponse { stop_reason });
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -829,21 +811,15 @@ impl NativeAgentConnection {
|
||||
log::debug!("Response stream completed");
|
||||
anyhow::Ok(acp::PromptResponse {
|
||||
stop_reason: acp::StopReason::EndTurn,
|
||||
meta: None,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct NativeAgentModelSelector {
|
||||
session_id: acp::SessionId,
|
||||
connection: NativeAgentConnection,
|
||||
}
|
||||
|
||||
impl acp_thread::AgentModelSelector for NativeAgentModelSelector {
|
||||
impl AgentModelSelector for NativeAgentConnection {
|
||||
fn list_models(&self, cx: &mut App) -> Task<Result<acp_thread::AgentModelList>> {
|
||||
log::debug!("NativeAgentConnection::list_models called");
|
||||
let list = self.connection.0.read(cx).models.model_list.clone();
|
||||
let list = self.0.read(cx).models.model_list.clone();
|
||||
Task::ready(if list.is_empty() {
|
||||
Err(anyhow::anyhow!("No models available"))
|
||||
} else {
|
||||
@@ -851,24 +827,24 @@ impl acp_thread::AgentModelSelector for NativeAgentModelSelector {
|
||||
})
|
||||
}
|
||||
|
||||
fn select_model(&self, model_id: acp::ModelId, cx: &mut App) -> Task<Result<()>> {
|
||||
log::debug!(
|
||||
"Setting model for session {}: {}",
|
||||
self.session_id,
|
||||
model_id
|
||||
);
|
||||
fn select_model(
|
||||
&self,
|
||||
session_id: acp::SessionId,
|
||||
model_id: acp_thread::AgentModelId,
|
||||
cx: &mut App,
|
||||
) -> Task<Result<()>> {
|
||||
log::debug!("Setting model for session {}: {}", session_id, model_id);
|
||||
let Some(thread) = self
|
||||
.connection
|
||||
.0
|
||||
.read(cx)
|
||||
.sessions
|
||||
.get(&self.session_id)
|
||||
.get(&session_id)
|
||||
.map(|session| session.thread.clone())
|
||||
else {
|
||||
return Task::ready(Err(anyhow!("Session not found")));
|
||||
};
|
||||
|
||||
let Some(model) = self.connection.0.read(cx).models.model_from_id(&model_id) else {
|
||||
let Some(model) = self.0.read(cx).models.model_from_id(&model_id) else {
|
||||
return Task::ready(Err(anyhow!("Invalid model ID {}", model_id)));
|
||||
};
|
||||
|
||||
@@ -876,32 +852,29 @@ impl acp_thread::AgentModelSelector for NativeAgentModelSelector {
|
||||
thread.set_model(model.clone(), cx);
|
||||
});
|
||||
|
||||
update_settings_file(
|
||||
self.connection.0.read(cx).fs.clone(),
|
||||
update_settings_file::<AgentSettings>(
|
||||
self.0.read(cx).fs.clone(),
|
||||
cx,
|
||||
move |settings, _cx| {
|
||||
let provider = model.provider_id().0.to_string();
|
||||
let model = model.id().0.to_string();
|
||||
settings
|
||||
.agent
|
||||
.get_or_insert_default()
|
||||
.set_model(LanguageModelSelection {
|
||||
provider: provider.into(),
|
||||
model,
|
||||
});
|
||||
settings.set_model(model);
|
||||
},
|
||||
);
|
||||
|
||||
Task::ready(Ok(()))
|
||||
}
|
||||
|
||||
fn selected_model(&self, cx: &mut App) -> Task<Result<acp_thread::AgentModelInfo>> {
|
||||
fn selected_model(
|
||||
&self,
|
||||
session_id: &acp::SessionId,
|
||||
cx: &mut App,
|
||||
) -> Task<Result<acp_thread::AgentModelInfo>> {
|
||||
let session_id = session_id.clone();
|
||||
|
||||
let Some(thread) = self
|
||||
.connection
|
||||
.0
|
||||
.read(cx)
|
||||
.sessions
|
||||
.get(&self.session_id)
|
||||
.get(&session_id)
|
||||
.map(|session| session.thread.clone())
|
||||
else {
|
||||
return Task::ready(Err(anyhow!("Session not found")));
|
||||
@@ -918,8 +891,8 @@ impl acp_thread::AgentModelSelector for NativeAgentModelSelector {
|
||||
)))
|
||||
}
|
||||
|
||||
fn watch(&self, cx: &mut App) -> Option<watch::Receiver<()>> {
|
||||
Some(self.connection.0.read(cx).models.watch())
|
||||
fn watch(&self, cx: &mut App) -> watch::Receiver<()> {
|
||||
self.0.read(cx).models.watch()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -975,11 +948,8 @@ impl acp_thread::AgentConnection for NativeAgentConnection {
|
||||
Task::ready(Ok(()))
|
||||
}
|
||||
|
||||
fn model_selector(&self, session_id: &acp::SessionId) -> Option<Rc<dyn AgentModelSelector>> {
|
||||
Some(Rc::new(NativeAgentModelSelector {
|
||||
session_id: session_id.clone(),
|
||||
connection: self.clone(),
|
||||
}) as Rc<dyn AgentModelSelector>)
|
||||
fn model_selector(&self) -> Option<Rc<dyn AgentModelSelector>> {
|
||||
Some(Rc::new(self.clone()) as Rc<dyn AgentModelSelector>)
|
||||
}
|
||||
|
||||
fn prompt(
|
||||
@@ -1034,7 +1004,7 @@ impl acp_thread::AgentConnection for NativeAgentConnection {
|
||||
) -> Option<Rc<dyn acp_thread::AgentSessionTruncate>> {
|
||||
self.0.read_with(cx, |agent, _cx| {
|
||||
agent.sessions.get(session_id).map(|session| {
|
||||
Rc::new(NativeAgentSessionTruncate {
|
||||
Rc::new(NativeAgentSessionEditor {
|
||||
thread: session.thread.clone(),
|
||||
acp_thread: session.acp_thread.clone(),
|
||||
}) as _
|
||||
@@ -1057,6 +1027,19 @@ impl acp_thread::AgentConnection for NativeAgentConnection {
|
||||
Some(Rc::new(self.clone()) as Rc<dyn acp_thread::AgentTelemetry>)
|
||||
}
|
||||
|
||||
fn list_commands(&self, session_id: &acp::SessionId, _cx: &mut App) -> Task<Result<acp::ListCommandsResponse>> {
|
||||
// Native agent doesn't support custom commands yet
|
||||
let _session_id = session_id.clone();
|
||||
Task::ready(Ok(acp::ListCommandsResponse {
|
||||
commands: vec![],
|
||||
}))
|
||||
}
|
||||
|
||||
fn run_command(&self, _request: acp::RunCommandRequest, _cx: &mut App) -> Task<Result<()>> {
|
||||
// Native agent doesn't support custom commands yet
|
||||
Task::ready(Err(anyhow!("Custom commands not supported")))
|
||||
}
|
||||
|
||||
fn into_any(self: Rc<Self>) -> Rc<dyn Any> {
|
||||
self
|
||||
}
|
||||
@@ -1083,12 +1066,12 @@ impl acp_thread::AgentTelemetry for NativeAgentConnection {
|
||||
}
|
||||
}
|
||||
|
||||
struct NativeAgentSessionTruncate {
|
||||
struct NativeAgentSessionEditor {
|
||||
thread: Entity<Thread>,
|
||||
acp_thread: WeakEntity<AcpThread>,
|
||||
}
|
||||
|
||||
impl acp_thread::AgentSessionTruncate for NativeAgentSessionTruncate {
|
||||
impl acp_thread::AgentSessionTruncate for NativeAgentSessionEditor {
|
||||
fn run(&self, message_id: acp_thread::UserMessageId, cx: &mut App) -> Task<Result<()>> {
|
||||
match self.thread.update(cx, |thread, cx| {
|
||||
thread.truncate(message_id.clone(), cx)?;
|
||||
@@ -1137,79 +1120,21 @@ impl acp_thread::AgentSessionSetTitle for NativeAgentSessionSetTitle {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AcpThreadEnvironment {
|
||||
acp_thread: WeakEntity<AcpThread>,
|
||||
}
|
||||
|
||||
impl ThreadEnvironment for AcpThreadEnvironment {
|
||||
fn create_terminal(
|
||||
&self,
|
||||
command: String,
|
||||
cwd: Option<PathBuf>,
|
||||
output_byte_limit: Option<u64>,
|
||||
cx: &mut AsyncApp,
|
||||
) -> Task<Result<Rc<dyn TerminalHandle>>> {
|
||||
let task = self.acp_thread.update(cx, |thread, cx| {
|
||||
thread.create_terminal(command, vec![], vec![], cwd, output_byte_limit, cx)
|
||||
});
|
||||
|
||||
let acp_thread = self.acp_thread.clone();
|
||||
cx.spawn(async move |cx| {
|
||||
let terminal = task?.await?;
|
||||
|
||||
let (drop_tx, drop_rx) = oneshot::channel();
|
||||
let terminal_id = terminal.read_with(cx, |terminal, _cx| terminal.id().clone())?;
|
||||
|
||||
cx.spawn(async move |cx| {
|
||||
drop_rx.await.ok();
|
||||
acp_thread.update(cx, |thread, cx| thread.release_terminal(terminal_id, cx))
|
||||
})
|
||||
.detach();
|
||||
|
||||
let handle = AcpTerminalHandle {
|
||||
terminal,
|
||||
_drop_tx: Some(drop_tx),
|
||||
};
|
||||
|
||||
Ok(Rc::new(handle) as _)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AcpTerminalHandle {
|
||||
terminal: Entity<acp_thread::Terminal>,
|
||||
_drop_tx: Option<oneshot::Sender<()>>,
|
||||
}
|
||||
|
||||
impl TerminalHandle for AcpTerminalHandle {
|
||||
fn id(&self, cx: &AsyncApp) -> Result<acp::TerminalId> {
|
||||
self.terminal.read_with(cx, |term, _cx| term.id().clone())
|
||||
}
|
||||
|
||||
fn wait_for_exit(&self, cx: &AsyncApp) -> Result<Shared<Task<acp::TerminalExitStatus>>> {
|
||||
self.terminal
|
||||
.read_with(cx, |term, _cx| term.wait_for_exit())
|
||||
}
|
||||
|
||||
fn current_output(&self, cx: &AsyncApp) -> Result<acp::TerminalOutputResponse> {
|
||||
self.terminal
|
||||
.read_with(cx, |term, cx| term.current_output(cx))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::HistoryEntryId;
|
||||
|
||||
use super::*;
|
||||
use acp_thread::{AgentConnection, AgentModelGroupName, AgentModelInfo, MentionUri};
|
||||
use acp_thread::{
|
||||
AgentConnection, AgentModelGroupName, AgentModelId, AgentModelInfo, MentionUri,
|
||||
};
|
||||
use fs::FakeFs;
|
||||
use gpui::TestAppContext;
|
||||
use indoc::formatdoc;
|
||||
use indoc::indoc;
|
||||
use language_model::fake_provider::FakeLanguageModel;
|
||||
use serde_json::json;
|
||||
use settings::SettingsStore;
|
||||
use util::{path, rel_path::rel_path};
|
||||
use util::path;
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_maintaining_project_context(cx: &mut TestAppContext) {
|
||||
@@ -1259,17 +1184,14 @@ mod tests {
|
||||
fs.insert_file("/a/.rules", Vec::new()).await;
|
||||
cx.run_until_parked();
|
||||
agent.read_with(cx, |agent, cx| {
|
||||
let rules_entry = worktree
|
||||
.read(cx)
|
||||
.entry_for_path(rel_path(".rules"))
|
||||
.unwrap();
|
||||
let rules_entry = worktree.read(cx).entry_for_path(".rules").unwrap();
|
||||
assert_eq!(
|
||||
agent.project_context.read(cx).worktrees,
|
||||
vec![WorktreeContext {
|
||||
root_name: "a".into(),
|
||||
abs_path: Path::new("/a").into(),
|
||||
rules_file: Some(RulesFileContext {
|
||||
path_in_worktree: rel_path(".rules").into(),
|
||||
path_in_worktree: Path::new(".rules").into(),
|
||||
text: "".into(),
|
||||
project_entry_id: rules_entry.id.to_usize()
|
||||
})
|
||||
@@ -1299,25 +1221,7 @@ mod tests {
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
// Create a thread/session
|
||||
let acp_thread = cx
|
||||
.update(|cx| {
|
||||
Rc::new(connection.clone()).new_thread(project.clone(), Path::new("/a"), cx)
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let session_id = cx.update(|cx| acp_thread.read(cx).session_id().clone());
|
||||
|
||||
let models = cx
|
||||
.update(|cx| {
|
||||
connection
|
||||
.model_selector(&session_id)
|
||||
.unwrap()
|
||||
.list_models(cx)
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let models = cx.update(|cx| connection.list_models(cx)).await.unwrap();
|
||||
|
||||
let acp_thread::AgentModelList::Grouped(models) = models else {
|
||||
panic!("Unexpected model group");
|
||||
@@ -1327,9 +1231,8 @@ mod tests {
|
||||
IndexMap::from_iter([(
|
||||
AgentModelGroupName("Fake".into()),
|
||||
vec![AgentModelInfo {
|
||||
id: acp::ModelId("fake/fake".into()),
|
||||
id: AgentModelId("fake/fake".into()),
|
||||
name: "Fake".into(),
|
||||
description: None,
|
||||
icon: Some(ui::IconName::ZedAssistant),
|
||||
}]
|
||||
)])
|
||||
@@ -1386,9 +1289,8 @@ mod tests {
|
||||
let session_id = cx.update(|cx| acp_thread.read(cx).session_id().clone());
|
||||
|
||||
// Select a model
|
||||
let selector = connection.model_selector(&session_id).unwrap();
|
||||
let model_id = acp::ModelId("fake/fake".into());
|
||||
cx.update(|cx| selector.select_model(model_id.clone(), cx))
|
||||
let model_id = AgentModelId("fake/fake".into());
|
||||
cx.update(|cx| connection.select_model(session_id.clone(), model_id.clone(), cx))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -1418,6 +1320,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[gpui::test]
|
||||
#[cfg_attr(target_os = "windows", ignore)] // TODO: Fix this test on Windows
|
||||
async fn test_save_load_thread(cx: &mut TestAppContext) {
|
||||
init_test(cx);
|
||||
let fs = FakeFs::new(cx.executor());
|
||||
@@ -1484,7 +1387,6 @@ mod tests {
|
||||
mime_type: None,
|
||||
size: None,
|
||||
title: None,
|
||||
meta: None,
|
||||
}),
|
||||
" mean?".into(),
|
||||
],
|
||||
@@ -1497,22 +1399,17 @@ mod tests {
|
||||
model.send_last_completion_stream_text_chunk("Lorem.");
|
||||
model.end_last_completion_stream();
|
||||
cx.run_until_parked();
|
||||
summary_model
|
||||
.send_last_completion_stream_text_chunk(&format!("Explaining {}", path!("/a/b.md")));
|
||||
summary_model.send_last_completion_stream_text_chunk("Explaining /a/b.md");
|
||||
summary_model.end_last_completion_stream();
|
||||
|
||||
send.await.unwrap();
|
||||
let uri = MentionUri::File {
|
||||
abs_path: path!("/a/b.md").into(),
|
||||
}
|
||||
.to_uri();
|
||||
acp_thread.read_with(cx, |thread, cx| {
|
||||
assert_eq!(
|
||||
thread.to_markdown(cx),
|
||||
formatdoc! {"
|
||||
indoc! {"
|
||||
## User
|
||||
|
||||
What does [@b.md]({uri}) mean?
|
||||
What does [@b.md](file:///a/b.md) mean?
|
||||
|
||||
## Assistant
|
||||
|
||||
@@ -1538,7 +1435,7 @@ mod tests {
|
||||
history_entries(&history_store, cx),
|
||||
vec![(
|
||||
HistoryEntryId::AcpThread(session_id.clone()),
|
||||
format!("Explaining {}", path!("/a/b.md"))
|
||||
"Explaining /a/b.md".into()
|
||||
)]
|
||||
);
|
||||
let acp_thread = agent
|
||||
@@ -1548,10 +1445,10 @@ mod tests {
|
||||
acp_thread.read_with(cx, |thread, cx| {
|
||||
assert_eq!(
|
||||
thread.to_markdown(cx),
|
||||
formatdoc! {"
|
||||
indoc! {"
|
||||
## User
|
||||
|
||||
What does [@b.md]({uri}) mean?
|
||||
What does [@b.md](file:///a/b.md) mean?
|
||||
|
||||
## Assistant
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ use sqlez::{
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use ui::{App, SharedString};
|
||||
use zed_env_vars::ZED_STATELESS;
|
||||
|
||||
pub type DbMessage = crate::Message;
|
||||
pub type DbSummary = DetailedSummaryState;
|
||||
@@ -202,6 +201,9 @@ impl DbThread {
|
||||
}
|
||||
}
|
||||
|
||||
pub static ZED_STATELESS: std::sync::LazyLock<bool> =
|
||||
std::sync::LazyLock::new(|| std::env::var("ZED_STATELESS").is_ok_and(|v| !v.is_empty()));
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum DataType {
|
||||
#[serde(rename = "json")]
|
||||
@@ -422,7 +424,7 @@ mod tests {
|
||||
use agent::MessageSegment;
|
||||
use agent::context::LoadedContext;
|
||||
use client::Client;
|
||||
use fs::{FakeFs, Fs};
|
||||
use fs::FakeFs;
|
||||
use gpui::AppContext;
|
||||
use gpui::TestAppContext;
|
||||
use http_client::FakeHttpClient;
|
||||
@@ -430,7 +432,7 @@ mod tests {
|
||||
use project::Project;
|
||||
use settings::SettingsStore;
|
||||
|
||||
fn init_test(fs: Arc<dyn Fs>, cx: &mut TestAppContext) {
|
||||
fn init_test(cx: &mut TestAppContext) {
|
||||
env_logger::try_init().ok();
|
||||
cx.update(|cx| {
|
||||
let settings_store = SettingsStore::test(cx);
|
||||
@@ -441,7 +443,7 @@ mod tests {
|
||||
let http_client = FakeHttpClient::with_404_response();
|
||||
let clock = Arc::new(clock::FakeSystemClock::new());
|
||||
let client = Client::new(clock, http_client, cx);
|
||||
agent::init(fs, cx);
|
||||
agent::init(cx);
|
||||
agent_settings::init(cx);
|
||||
language_model::init(client, cx);
|
||||
});
|
||||
@@ -449,8 +451,8 @@ mod tests {
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_retrieving_old_thread(cx: &mut TestAppContext) {
|
||||
init_test(cx);
|
||||
let fs = FakeFs::new(cx.executor());
|
||||
init_test(fs.clone(), cx);
|
||||
let project = Project::test(fs, [], cx).await;
|
||||
|
||||
// Save a thread using the old agent.
|
||||
|
||||
@@ -262,7 +262,7 @@ impl HistoryStore {
|
||||
.iter()
|
||||
.filter_map(|entry| match entry {
|
||||
HistoryEntryId::TextThread(path) => path.file_name().map(|file| {
|
||||
SerializedRecentOpen::TextThread(file.to_string_lossy().into_owned())
|
||||
SerializedRecentOpen::TextThread(file.to_string_lossy().to_string())
|
||||
}),
|
||||
HistoryEntryId::AcpThread(id) => {
|
||||
Some(SerializedRecentOpen::AcpThread(id.to_string()))
|
||||
|
||||
@@ -35,15 +35,10 @@ impl AgentServer for NativeAgentServer {
|
||||
|
||||
fn connect(
|
||||
&self,
|
||||
_root_dir: Option<&Path>,
|
||||
_root_dir: &Path,
|
||||
delegate: AgentServerDelegate,
|
||||
cx: &mut App,
|
||||
) -> Task<
|
||||
Result<(
|
||||
Rc<dyn acp_thread::AgentConnection>,
|
||||
Option<task::SpawnInTerminal>,
|
||||
)>,
|
||||
> {
|
||||
) -> Task<Result<Rc<dyn acp_thread::AgentConnection>>> {
|
||||
log::debug!(
|
||||
"NativeAgentServer::connect called for path: {:?}",
|
||||
_root_dir
|
||||
@@ -65,10 +60,7 @@ impl AgentServer for NativeAgentServer {
|
||||
let connection = NativeAgentConnection(agent);
|
||||
log::debug!("NativeAgentServer connection established successfully");
|
||||
|
||||
Ok((
|
||||
Rc::new(connection) as Rc<dyn acp_thread::AgentConnection>,
|
||||
None,
|
||||
))
|
||||
Ok(Rc::new(connection) as Rc<dyn acp_thread::AgentConnection>)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -48,15 +48,16 @@ The one exception to this is if the user references something you don't know abo
|
||||
## Code Block Formatting
|
||||
|
||||
Whenever you mention a code block, you MUST use ONLY use the following format:
|
||||
|
||||
```path/to/Something.blah#L123-456
|
||||
(code goes here)
|
||||
```
|
||||
|
||||
The `#L123-456` means the line number range 123 through 456, and the path/to/Something.blah is a path in the project. (If there is no valid path in the project, then you can use /dev/null/path.extension for its path.) This is the ONLY valid way to format code blocks, because the Markdown parser does not understand the more common ```language syntax, or bare ``` blocks. It only understands this path-based syntax, and if the path is missing, then it will error and you will have to do it over again.
|
||||
The `#L123-456` means the line number range 123 through 456, and the path/to/Something.blah
|
||||
is a path in the project. (If there is no valid path in the project, then you can use
|
||||
/dev/null/path.extension for its path.) This is the ONLY valid way to format code blocks, because the Markdown parser
|
||||
does not understand the more common ```language syntax, or bare ``` blocks. It only
|
||||
understands this path-based syntax, and if the path is missing, then it will error and you will have to do it over again.
|
||||
Just to be really clear about this, if you ever find yourself writing three backticks followed by a language name, STOP!
|
||||
You have made a mistake. You can only ever put paths after triple backticks!
|
||||
|
||||
<example>
|
||||
Based on all the information I've gathered, here's a summary of how this system works:
|
||||
1. The README file is loaded into the system.
|
||||
@@ -73,7 +74,6 @@ This is the last header in the README.
|
||||
```
|
||||
4. Finally, it passes this information on to the next process.
|
||||
</example>
|
||||
|
||||
<example>
|
||||
In Markdown, hash marks signify headings. For example:
|
||||
```/dev/null/example.md#L1-3
|
||||
@@ -82,7 +82,6 @@ In Markdown, hash marks signify headings. For example:
|
||||
### Level 3 heading
|
||||
```
|
||||
</example>
|
||||
|
||||
Here are examples of ways you must never render code blocks:
|
||||
<bad_example_do_not_do_this>
|
||||
In Markdown, hash marks signify headings. For example:
|
||||
@@ -92,9 +91,7 @@ In Markdown, hash marks signify headings. For example:
|
||||
### Level 3 heading
|
||||
```
|
||||
</bad_example_do_not_do_this>
|
||||
|
||||
This example is unacceptable because it does not include the path.
|
||||
|
||||
<bad_example_do_not_do_this>
|
||||
In Markdown, hash marks signify headings. For example:
|
||||
```markdown
|
||||
@@ -104,15 +101,14 @@ In Markdown, hash marks signify headings. For example:
|
||||
```
|
||||
</bad_example_do_not_do_this>
|
||||
This example is unacceptable because it has the language instead of the path.
|
||||
|
||||
<bad_example_do_not_do_this>
|
||||
In Markdown, hash marks signify headings. For example:
|
||||
# Level 1 heading
|
||||
## Level 2 heading
|
||||
### Level 3 heading
|
||||
</bad_example_do_not_do_this>
|
||||
This example is unacceptable because it uses indentation to mark the code block instead of backticks with a path.
|
||||
|
||||
This example is unacceptable because it uses indentation to mark the code block
|
||||
instead of backticks with a path.
|
||||
<bad_example_do_not_do_this>
|
||||
In Markdown, hash marks signify headings. For example:
|
||||
```markdown
|
||||
|
||||
@@ -72,6 +72,7 @@ async fn test_echo(cx: &mut TestAppContext) {
|
||||
}
|
||||
|
||||
#[gpui::test]
|
||||
#[cfg_attr(target_os = "windows", ignore)] // TODO: Fix this test on Windows
|
||||
async fn test_thinking(cx: &mut TestAppContext) {
|
||||
let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
|
||||
let fake_model = model.as_fake();
|
||||
@@ -949,7 +950,6 @@ async fn test_mcp_tools(cx: &mut TestAppContext) {
|
||||
paths::settings_file(),
|
||||
json!({
|
||||
"agent": {
|
||||
"always_allow_tool_actions": true,
|
||||
"profiles": {
|
||||
"test": {
|
||||
"name": "Test Profile",
|
||||
@@ -1299,7 +1299,6 @@ async fn test_cancellation(cx: &mut TestAppContext) {
|
||||
status: Some(acp::ToolCallStatus::Completed),
|
||||
..
|
||||
},
|
||||
meta: None,
|
||||
},
|
||||
)) if Some(&id) == echo_id.as_ref() => {
|
||||
echo_completed = true;
|
||||
@@ -1349,6 +1348,7 @@ async fn test_cancellation(cx: &mut TestAppContext) {
|
||||
}
|
||||
|
||||
#[gpui::test]
|
||||
#[cfg_attr(target_os = "windows", ignore)] // TODO: Fix this test on Windows
|
||||
async fn test_in_progress_send_canceled_by_next_send(cx: &mut TestAppContext) {
|
||||
let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
|
||||
let fake_model = model.as_fake();
|
||||
@@ -1687,6 +1687,7 @@ async fn test_truncate_second_message(cx: &mut TestAppContext) {
|
||||
}
|
||||
|
||||
#[gpui::test]
|
||||
#[cfg_attr(target_os = "windows", ignore)] // TODO: Fix this test on Windows
|
||||
async fn test_title_generation(cx: &mut TestAppContext) {
|
||||
let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
|
||||
let fake_model = model.as_fake();
|
||||
@@ -1850,18 +1851,8 @@ async fn test_agent_connection(cx: &mut TestAppContext) {
|
||||
.unwrap();
|
||||
let connection = NativeAgentConnection(agent.clone());
|
||||
|
||||
// Create a thread using new_thread
|
||||
let connection_rc = Rc::new(connection.clone());
|
||||
let acp_thread = cx
|
||||
.update(|cx| connection_rc.new_thread(project, cwd, cx))
|
||||
.await
|
||||
.expect("new_thread should succeed");
|
||||
|
||||
// Get the session_id from the AcpThread
|
||||
let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
|
||||
|
||||
// Test model_selector returns Some
|
||||
let selector_opt = connection.model_selector(&session_id);
|
||||
let selector_opt = connection.model_selector();
|
||||
assert!(
|
||||
selector_opt.is_some(),
|
||||
"agent2 should always support ModelSelector"
|
||||
@@ -1878,16 +1869,23 @@ async fn test_agent_connection(cx: &mut TestAppContext) {
|
||||
};
|
||||
assert!(!listed_models.is_empty(), "should have at least one model");
|
||||
assert_eq!(
|
||||
listed_models[&AgentModelGroupName("Fake".into())][0]
|
||||
.id
|
||||
.0
|
||||
.as_ref(),
|
||||
listed_models[&AgentModelGroupName("Fake".into())][0].id.0,
|
||||
"fake/fake"
|
||||
);
|
||||
|
||||
// Create a thread using new_thread
|
||||
let connection_rc = Rc::new(connection.clone());
|
||||
let acp_thread = cx
|
||||
.update(|cx| connection_rc.new_thread(project, cwd, cx))
|
||||
.await
|
||||
.expect("new_thread should succeed");
|
||||
|
||||
// Get the session_id from the AcpThread
|
||||
let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
|
||||
|
||||
// Test selected_model returns the default
|
||||
let model = cx
|
||||
.update(|cx| selector.selected_model(cx))
|
||||
.update(|cx| selector.selected_model(&session_id, cx))
|
||||
.await
|
||||
.expect("selected_model should succeed");
|
||||
let model = cx
|
||||
@@ -1930,7 +1928,6 @@ async fn test_agent_connection(cx: &mut TestAppContext) {
|
||||
acp::PromptRequest {
|
||||
session_id: session_id.clone(),
|
||||
prompt: vec!["ghi".into()],
|
||||
meta: None,
|
||||
},
|
||||
cx,
|
||||
)
|
||||
@@ -1995,7 +1992,6 @@ async fn test_tool_updates_to_completion(cx: &mut TestAppContext) {
|
||||
locations: vec![],
|
||||
raw_input: Some(json!({})),
|
||||
raw_output: None,
|
||||
meta: None,
|
||||
}
|
||||
);
|
||||
let update = expect_tool_call_update_fields(&mut events).await;
|
||||
@@ -2009,7 +2005,6 @@ async fn test_tool_updates_to_completion(cx: &mut TestAppContext) {
|
||||
raw_input: Some(json!({ "content": "Thinking hard!" })),
|
||||
..Default::default()
|
||||
},
|
||||
meta: None,
|
||||
}
|
||||
);
|
||||
let update = expect_tool_call_update_fields(&mut events).await;
|
||||
@@ -2021,7 +2016,6 @@ async fn test_tool_updates_to_completion(cx: &mut TestAppContext) {
|
||||
status: Some(acp::ToolCallStatus::InProgress),
|
||||
..Default::default()
|
||||
},
|
||||
meta: None,
|
||||
}
|
||||
);
|
||||
let update = expect_tool_call_update_fields(&mut events).await;
|
||||
@@ -2033,7 +2027,6 @@ async fn test_tool_updates_to_completion(cx: &mut TestAppContext) {
|
||||
content: Some(vec!["Thinking hard!".into()]),
|
||||
..Default::default()
|
||||
},
|
||||
meta: None,
|
||||
}
|
||||
);
|
||||
let update = expect_tool_call_update_fields(&mut events).await;
|
||||
@@ -2046,7 +2039,6 @@ async fn test_tool_updates_to_completion(cx: &mut TestAppContext) {
|
||||
raw_output: Some("Finished thinking.".into()),
|
||||
..Default::default()
|
||||
},
|
||||
meta: None,
|
||||
}
|
||||
);
|
||||
}
|
||||
@@ -2360,20 +2352,15 @@ async fn setup(cx: &mut TestAppContext, model: TestModel) -> ThreadTest {
|
||||
settings::init(cx);
|
||||
Project::init_settings(cx);
|
||||
agent_settings::init(cx);
|
||||
gpui_tokio::init(cx);
|
||||
let http_client = ReqwestClient::user_agent("agent tests").unwrap();
|
||||
cx.set_http_client(Arc::new(http_client));
|
||||
|
||||
match model {
|
||||
TestModel::Fake => {}
|
||||
TestModel::Sonnet4 => {
|
||||
gpui_tokio::init(cx);
|
||||
let http_client = ReqwestClient::user_agent("agent tests").unwrap();
|
||||
cx.set_http_client(Arc::new(http_client));
|
||||
client::init_settings(cx);
|
||||
let client = Client::production(cx);
|
||||
let user_store = cx.new(|cx| UserStore::new(client.clone(), cx));
|
||||
language_model::init(client.clone(), cx);
|
||||
language_models::init(user_store, client.clone(), cx);
|
||||
}
|
||||
};
|
||||
client::init_settings(cx);
|
||||
let client = Client::production(cx);
|
||||
let user_store = cx.new(|cx| UserStore::new(client.clone(), cx));
|
||||
language_model::init(client.clone(), cx);
|
||||
language_models::init(user_store, client.clone(), cx);
|
||||
|
||||
watch_settings(fs.clone(), cx);
|
||||
});
|
||||
@@ -2487,7 +2474,6 @@ fn setup_context_server(
|
||||
path: "somebinary".into(),
|
||||
args: Vec::new(),
|
||||
env: None,
|
||||
timeout: None,
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
@@ -24,11 +24,7 @@ impl AgentTool for EchoTool {
|
||||
acp::ToolKind::Other
|
||||
}
|
||||
|
||||
fn initial_title(
|
||||
&self,
|
||||
_input: Result<Self::Input, serde_json::Value>,
|
||||
_cx: &mut App,
|
||||
) -> SharedString {
|
||||
fn initial_title(&self, _input: Result<Self::Input, serde_json::Value>) -> SharedString {
|
||||
"Echo".into()
|
||||
}
|
||||
|
||||
@@ -59,11 +55,7 @@ impl AgentTool for DelayTool {
|
||||
"delay"
|
||||
}
|
||||
|
||||
fn initial_title(
|
||||
&self,
|
||||
input: Result<Self::Input, serde_json::Value>,
|
||||
_cx: &mut App,
|
||||
) -> SharedString {
|
||||
fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString {
|
||||
if let Ok(input) = input {
|
||||
format!("Delay {}ms", input.ms).into()
|
||||
} else {
|
||||
@@ -108,11 +100,7 @@ impl AgentTool for ToolRequiringPermission {
|
||||
acp::ToolKind::Other
|
||||
}
|
||||
|
||||
fn initial_title(
|
||||
&self,
|
||||
_input: Result<Self::Input, serde_json::Value>,
|
||||
_cx: &mut App,
|
||||
) -> SharedString {
|
||||
fn initial_title(&self, _input: Result<Self::Input, serde_json::Value>) -> SharedString {
|
||||
"This tool requires permission".into()
|
||||
}
|
||||
|
||||
@@ -147,11 +135,7 @@ impl AgentTool for InfiniteTool {
|
||||
acp::ToolKind::Other
|
||||
}
|
||||
|
||||
fn initial_title(
|
||||
&self,
|
||||
_input: Result<Self::Input, serde_json::Value>,
|
||||
_cx: &mut App,
|
||||
) -> SharedString {
|
||||
fn initial_title(&self, _input: Result<Self::Input, serde_json::Value>) -> SharedString {
|
||||
"Infinite Tool".into()
|
||||
}
|
||||
|
||||
@@ -202,11 +186,7 @@ impl AgentTool for WordListTool {
|
||||
acp::ToolKind::Other
|
||||
}
|
||||
|
||||
fn initial_title(
|
||||
&self,
|
||||
_input: Result<Self::Input, serde_json::Value>,
|
||||
_cx: &mut App,
|
||||
) -> SharedString {
|
||||
fn initial_title(&self, _input: Result<Self::Input, serde_json::Value>) -> SharedString {
|
||||
"List of random words".into()
|
||||
}
|
||||
|
||||
|
||||
@@ -15,11 +15,10 @@ use agent_settings::{
|
||||
use anyhow::{Context as _, Result, anyhow};
|
||||
use assistant_tool::adapt_schema_to_format;
|
||||
use chrono::{DateTime, Utc};
|
||||
use client::{ModelRequestUsage, RequestUsage, UserStore};
|
||||
use cloud_llm_client::{CompletionIntent, CompletionRequestStatus, Plan, UsageLimit};
|
||||
use client::{ModelRequestUsage, RequestUsage};
|
||||
use cloud_llm_client::{CompletionIntent, CompletionRequestStatus, UsageLimit};
|
||||
use collections::{HashMap, HashSet, IndexMap};
|
||||
use fs::Fs;
|
||||
use futures::stream;
|
||||
use futures::{
|
||||
FutureExt,
|
||||
channel::{mpsc, oneshot},
|
||||
@@ -35,7 +34,7 @@ use language_model::{
|
||||
LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry, LanguageModelRequest,
|
||||
LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
|
||||
LanguageModelToolResultContent, LanguageModelToolSchemaFormat, LanguageModelToolUse,
|
||||
LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage, ZED_CLOUD_PROVIDER_ID,
|
||||
LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage,
|
||||
};
|
||||
use project::{
|
||||
Project,
|
||||
@@ -46,15 +45,14 @@ use schemars::{JsonSchema, Schema};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use settings::{Settings, update_settings_file};
|
||||
use smol::stream::StreamExt;
|
||||
use std::fmt::Write;
|
||||
use std::{
|
||||
collections::BTreeMap,
|
||||
ops::RangeInclusive,
|
||||
path::Path,
|
||||
rc::Rc,
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use std::{fmt::Write, path::PathBuf};
|
||||
use util::{ResultExt, debug_panic, markdown::MarkdownCodeBlock};
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -486,15 +484,11 @@ impl AgentMessage {
|
||||
};
|
||||
|
||||
for tool_result in self.tool_results.values() {
|
||||
let mut tool_result = tool_result.clone();
|
||||
// Surprisingly, the API fails if we return an empty string here.
|
||||
// It thinks we are sending a tool use without a tool result.
|
||||
if tool_result.content.is_empty() {
|
||||
tool_result.content = "<Tool returned an empty string>".into();
|
||||
}
|
||||
user_message
|
||||
.content
|
||||
.push(language_model::MessageContent::ToolResult(tool_result));
|
||||
.push(language_model::MessageContent::ToolResult(
|
||||
tool_result.clone(),
|
||||
));
|
||||
}
|
||||
|
||||
let mut messages = Vec::new();
|
||||
@@ -525,22 +519,6 @@ pub enum AgentMessageContent {
|
||||
ToolUse(LanguageModelToolUse),
|
||||
}
|
||||
|
||||
pub trait TerminalHandle {
|
||||
fn id(&self, cx: &AsyncApp) -> Result<acp::TerminalId>;
|
||||
fn current_output(&self, cx: &AsyncApp) -> Result<acp::TerminalOutputResponse>;
|
||||
fn wait_for_exit(&self, cx: &AsyncApp) -> Result<Shared<Task<acp::TerminalExitStatus>>>;
|
||||
}
|
||||
|
||||
pub trait ThreadEnvironment {
|
||||
fn create_terminal(
|
||||
&self,
|
||||
command: String,
|
||||
cwd: Option<PathBuf>,
|
||||
output_byte_limit: Option<u64>,
|
||||
cx: &mut AsyncApp,
|
||||
) -> Task<Result<Rc<dyn TerminalHandle>>>;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ThreadEvent {
|
||||
UserMessage(UserMessage),
|
||||
@@ -553,14 +531,6 @@ pub enum ThreadEvent {
|
||||
Stop(acp::StopReason),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct NewTerminal {
|
||||
pub command: String,
|
||||
pub output_byte_limit: Option<u64>,
|
||||
pub cwd: Option<PathBuf>,
|
||||
pub response: oneshot::Sender<Result<Entity<acp_thread::Terminal>>>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ToolCallAuthorization {
|
||||
pub tool_call: acp::ToolCallUpdate,
|
||||
@@ -586,7 +556,6 @@ pub struct Thread {
|
||||
pending_title_generation: Option<Task<()>>,
|
||||
summary: Option<SharedString>,
|
||||
messages: Vec<Message>,
|
||||
user_store: Entity<UserStore>,
|
||||
completion_mode: CompletionMode,
|
||||
/// Holds the task that handles agent interaction until the end of the turn.
|
||||
/// Survives across multiple requests as the model performs tool calls and
|
||||
@@ -616,10 +585,10 @@ impl Thread {
|
||||
fn prompt_capabilities(model: Option<&dyn LanguageModel>) -> acp::PromptCapabilities {
|
||||
let image = model.map_or(true, |model| model.supports_images());
|
||||
acp::PromptCapabilities {
|
||||
meta: None,
|
||||
image,
|
||||
audio: false,
|
||||
embedded_context: true,
|
||||
supports_custom_commands: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -643,7 +612,6 @@ impl Thread {
|
||||
pending_title_generation: None,
|
||||
summary: None,
|
||||
messages: Vec::new(),
|
||||
user_store: project.read(cx).user_store(),
|
||||
completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
|
||||
running_turn: None,
|
||||
pending_message: None,
|
||||
@@ -732,7 +700,6 @@ impl Thread {
|
||||
stream
|
||||
.0
|
||||
.unbounded_send(Ok(ThreadEvent::ToolCall(acp::ToolCall {
|
||||
meta: None,
|
||||
id: acp::ToolCallId(tool_use.id.to_string().into()),
|
||||
title: tool_use.name.to_string(),
|
||||
kind: acp::ToolKind::Other,
|
||||
@@ -746,7 +713,7 @@ impl Thread {
|
||||
return;
|
||||
};
|
||||
|
||||
let title = tool.initial_title(tool_use.input.clone(), cx);
|
||||
let title = tool.initial_title(tool_use.input.clone());
|
||||
let kind = tool.kind();
|
||||
stream.send_tool_call(&tool_use.id, title, kind, tool_use.input.clone());
|
||||
|
||||
@@ -823,7 +790,6 @@ impl Thread {
|
||||
pending_title_generation: None,
|
||||
summary: db_thread.detailed_summary,
|
||||
messages: db_thread.messages,
|
||||
user_store: project.read(cx).user_store(),
|
||||
completion_mode: db_thread.completion_mode.unwrap_or_default(),
|
||||
running_turn: None,
|
||||
pending_message: None,
|
||||
@@ -883,11 +849,27 @@ impl Thread {
|
||||
.map(|worktree| Self::worktree_snapshot(worktree, git_store.clone(), cx))
|
||||
.collect();
|
||||
|
||||
cx.spawn(async move |_, _| {
|
||||
cx.spawn(async move |_, cx| {
|
||||
let worktree_snapshots = futures::future::join_all(worktree_snapshots).await;
|
||||
|
||||
let mut unsaved_buffers = Vec::new();
|
||||
cx.update(|app_cx| {
|
||||
let buffer_store = project.read(app_cx).buffer_store();
|
||||
for buffer_handle in buffer_store.read(app_cx).buffers() {
|
||||
let buffer = buffer_handle.read(app_cx);
|
||||
if buffer.is_dirty()
|
||||
&& let Some(file) = buffer.file()
|
||||
{
|
||||
let path = file.path().to_string_lossy().to_string();
|
||||
unsaved_buffers.push(path);
|
||||
}
|
||||
}
|
||||
})
|
||||
.ok();
|
||||
|
||||
Arc::new(ProjectSnapshot {
|
||||
worktree_snapshots,
|
||||
unsaved_buffer_paths: unsaved_buffers,
|
||||
timestamp: Utc::now(),
|
||||
})
|
||||
})
|
||||
@@ -902,7 +884,7 @@ impl Thread {
|
||||
// Get worktree path and snapshot
|
||||
let worktree_info = cx.update(|app_cx| {
|
||||
let worktree = worktree.read(app_cx);
|
||||
let path = worktree.abs_path().to_string_lossy().into_owned();
|
||||
let path = worktree.abs_path().to_string_lossy().to_string();
|
||||
let snapshot = worktree.snapshot();
|
||||
(path, snapshot)
|
||||
});
|
||||
@@ -1039,11 +1021,7 @@ impl Thread {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_default_tools(
|
||||
&mut self,
|
||||
environment: Rc<dyn ThreadEnvironment>,
|
||||
cx: &mut Context<Self>,
|
||||
) {
|
||||
pub fn add_default_tools(&mut self, cx: &mut Context<Self>) {
|
||||
let language_registry = self.project.read(cx).languages().clone();
|
||||
self.add_tool(CopyPathTool::new(self.project.clone()));
|
||||
self.add_tool(CreateDirectoryTool::new(self.project.clone()));
|
||||
@@ -1052,11 +1030,7 @@ impl Thread {
|
||||
self.action_log.clone(),
|
||||
));
|
||||
self.add_tool(DiagnosticsTool::new(self.project.clone()));
|
||||
self.add_tool(EditFileTool::new(
|
||||
self.project.clone(),
|
||||
cx.weak_entity(),
|
||||
language_registry,
|
||||
));
|
||||
self.add_tool(EditFileTool::new(cx.weak_entity(), language_registry));
|
||||
self.add_tool(FetchTool::new(self.project.read(cx).client().http_client()));
|
||||
self.add_tool(FindPathTool::new(self.project.clone()));
|
||||
self.add_tool(GrepTool::new(self.project.clone()));
|
||||
@@ -1068,7 +1042,7 @@ impl Thread {
|
||||
self.project.clone(),
|
||||
self.action_log.clone(),
|
||||
));
|
||||
self.add_tool(TerminalTool::new(self.project.clone(), environment));
|
||||
self.add_tool(TerminalTool::new(self.project.clone(), cx));
|
||||
self.add_tool(ThinkingTool);
|
||||
self.add_tool(WebSearchTool);
|
||||
}
|
||||
@@ -1253,12 +1227,12 @@ impl Thread {
|
||||
);
|
||||
|
||||
log::debug!("Calling model.stream_completion, attempt {}", attempt);
|
||||
|
||||
let (mut events, mut error) = match model.stream_completion(request, cx).await {
|
||||
Ok(events) => (events, None),
|
||||
Err(err) => (stream::empty().boxed(), Some(err)),
|
||||
};
|
||||
let mut events = model
|
||||
.stream_completion(request, cx)
|
||||
.await
|
||||
.map_err(|error| anyhow!(error))?;
|
||||
let mut tool_results = FuturesUnordered::new();
|
||||
let mut error = None;
|
||||
while let Some(event) = events.next().await {
|
||||
log::trace!("Received completion event: {:?}", event);
|
||||
match event {
|
||||
@@ -1306,10 +1280,8 @@ impl Thread {
|
||||
|
||||
if let Some(error) = error {
|
||||
attempt += 1;
|
||||
let retry = this.update(cx, |this, cx| {
|
||||
let user_store = this.user_store.read(cx);
|
||||
this.handle_completion_error(error, attempt, user_store.plan())
|
||||
})??;
|
||||
let retry =
|
||||
this.update(cx, |this, _| this.handle_completion_error(error, attempt))??;
|
||||
let timer = cx.background_executor().timer(retry.duration);
|
||||
event_stream.send_retry(retry);
|
||||
timer.await;
|
||||
@@ -1336,23 +1308,8 @@ impl Thread {
|
||||
&mut self,
|
||||
error: LanguageModelCompletionError,
|
||||
attempt: u8,
|
||||
plan: Option<Plan>,
|
||||
) -> Result<acp_thread::RetryStatus> {
|
||||
let Some(model) = self.model.as_ref() else {
|
||||
return Err(anyhow!(error));
|
||||
};
|
||||
|
||||
let auto_retry = if model.provider_id() == ZED_CLOUD_PROVIDER_ID {
|
||||
match plan {
|
||||
Some(Plan::V2(_)) => true,
|
||||
Some(Plan::V1(_)) => self.completion_mode == CompletionMode::Burn,
|
||||
None => false,
|
||||
}
|
||||
} else {
|
||||
true
|
||||
};
|
||||
|
||||
if !auto_retry {
|
||||
if self.completion_mode == CompletionMode::Normal {
|
||||
return Err(anyhow!(error));
|
||||
}
|
||||
|
||||
@@ -1525,7 +1482,7 @@ impl Thread {
|
||||
let mut title = SharedString::from(&tool_use.name);
|
||||
let mut kind = acp::ToolKind::Other;
|
||||
if let Some(tool) = tool.as_ref() {
|
||||
title = tool.initial_title(tool_use.input.clone(), cx);
|
||||
title = tool.initial_title(tool_use.input.clone());
|
||||
kind = tool.kind();
|
||||
}
|
||||
|
||||
@@ -2159,11 +2116,7 @@ where
|
||||
fn kind() -> acp::ToolKind;
|
||||
|
||||
/// The initial tool title to display. Can be updated during the tool run.
|
||||
fn initial_title(
|
||||
&self,
|
||||
input: Result<Self::Input, serde_json::Value>,
|
||||
cx: &mut App,
|
||||
) -> SharedString;
|
||||
fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString;
|
||||
|
||||
/// Returns the JSON schema that describes the tool's input.
|
||||
fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Schema {
|
||||
@@ -2211,7 +2164,7 @@ pub trait AnyAgentTool {
|
||||
fn name(&self) -> SharedString;
|
||||
fn description(&self) -> SharedString;
|
||||
fn kind(&self) -> acp::ToolKind;
|
||||
fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString;
|
||||
fn initial_title(&self, input: serde_json::Value) -> SharedString;
|
||||
fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value>;
|
||||
fn supported_provider(&self, _provider: &LanguageModelProviderId) -> bool {
|
||||
true
|
||||
@@ -2247,9 +2200,9 @@ where
|
||||
T::kind()
|
||||
}
|
||||
|
||||
fn initial_title(&self, input: serde_json::Value, _cx: &mut App) -> SharedString {
|
||||
fn initial_title(&self, input: serde_json::Value) -> SharedString {
|
||||
let parsed_input = serde_json::from_value(input.clone()).map_err(|_| input);
|
||||
self.0.initial_title(parsed_input, _cx)
|
||||
self.0.initial_title(parsed_input)
|
||||
}
|
||||
|
||||
fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value> {
|
||||
@@ -2340,7 +2293,6 @@ impl ThreadEventStream {
|
||||
input: serde_json::Value,
|
||||
) -> acp::ToolCall {
|
||||
acp::ToolCall {
|
||||
meta: None,
|
||||
id: acp::ToolCallId(id.to_string().into()),
|
||||
title,
|
||||
kind,
|
||||
@@ -2360,7 +2312,6 @@ impl ThreadEventStream {
|
||||
self.0
|
||||
.unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
|
||||
acp::ToolCallUpdate {
|
||||
meta: None,
|
||||
id: acp::ToolCallId(tool_use_id.to_string().into()),
|
||||
fields,
|
||||
}
|
||||
@@ -2435,6 +2386,19 @@ impl ToolCallEventStream {
|
||||
.ok();
|
||||
}
|
||||
|
||||
pub fn update_terminal(&self, terminal: Entity<acp_thread::Terminal>) {
|
||||
self.stream
|
||||
.0
|
||||
.unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
|
||||
acp_thread::ToolCallUpdateTerminal {
|
||||
id: acp::ToolCallId(self.tool_use_id.to_string().into()),
|
||||
terminal,
|
||||
}
|
||||
.into(),
|
||||
)))
|
||||
.ok();
|
||||
}
|
||||
|
||||
pub fn authorize(&self, title: impl Into<String>, cx: &mut App) -> Task<Result<()>> {
|
||||
if agent_settings::AgentSettings::get_global(cx).always_allow_tool_actions {
|
||||
return Task::ready(Ok(()));
|
||||
@@ -2446,7 +2410,6 @@ impl ToolCallEventStream {
|
||||
.unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
|
||||
ToolCallAuthorization {
|
||||
tool_call: acp::ToolCallUpdate {
|
||||
meta: None,
|
||||
id: acp::ToolCallId(self.tool_use_id.to_string().into()),
|
||||
fields: acp::ToolCallUpdateFields {
|
||||
title: Some(title.into()),
|
||||
@@ -2458,19 +2421,16 @@ impl ToolCallEventStream {
|
||||
id: acp::PermissionOptionId("always_allow".into()),
|
||||
name: "Always Allow".into(),
|
||||
kind: acp::PermissionOptionKind::AllowAlways,
|
||||
meta: None,
|
||||
},
|
||||
acp::PermissionOption {
|
||||
id: acp::PermissionOptionId("allow".into()),
|
||||
name: "Allow".into(),
|
||||
kind: acp::PermissionOptionKind::AllowOnce,
|
||||
meta: None,
|
||||
},
|
||||
acp::PermissionOption {
|
||||
id: acp::PermissionOptionId("deny".into()),
|
||||
name: "Deny".into(),
|
||||
kind: acp::PermissionOptionKind::RejectOnce,
|
||||
meta: None,
|
||||
},
|
||||
],
|
||||
response: response_tx,
|
||||
@@ -2482,11 +2442,8 @@ impl ToolCallEventStream {
|
||||
"always_allow" => {
|
||||
if let Some(fs) = fs.clone() {
|
||||
cx.update(|cx| {
|
||||
update_settings_file(fs, cx, |settings, _| {
|
||||
settings
|
||||
.agent
|
||||
.get_or_insert_default()
|
||||
.set_always_allow_tool_actions(true);
|
||||
update_settings_file::<AgentSettings>(fs, cx, |settings, _| {
|
||||
settings.set_always_allow_tool_actions(true);
|
||||
});
|
||||
})?;
|
||||
}
|
||||
@@ -2627,21 +2584,17 @@ impl From<UserMessageContent> for acp::ContentBlock {
|
||||
UserMessageContent::Text(text) => acp::ContentBlock::Text(acp::TextContent {
|
||||
text,
|
||||
annotations: None,
|
||||
meta: None,
|
||||
}),
|
||||
UserMessageContent::Image(image) => acp::ContentBlock::Image(acp::ImageContent {
|
||||
data: image.source.to_string(),
|
||||
mime_type: "image/png".to_string(),
|
||||
meta: None,
|
||||
annotations: None,
|
||||
uri: None,
|
||||
}),
|
||||
UserMessageContent::Mention { uri, content } => {
|
||||
acp::ContentBlock::Resource(acp::EmbeddedResource {
|
||||
meta: None,
|
||||
resource: acp::EmbeddedResourceResource::TextResourceContents(
|
||||
acp::TextResourceContents {
|
||||
meta: None,
|
||||
mime_type: None,
|
||||
text: content,
|
||||
uri: uri.to_uri().to_string(),
|
||||
|
||||
@@ -145,7 +145,7 @@ impl AnyAgentTool for ContextServerTool {
|
||||
ToolKind::Other
|
||||
}
|
||||
|
||||
fn initial_title(&self, _input: serde_json::Value, _cx: &mut App) -> SharedString {
|
||||
fn initial_title(&self, _input: serde_json::Value) -> SharedString {
|
||||
format!("Run MCP tool `{}`", self.tool.name).into()
|
||||
}
|
||||
|
||||
@@ -169,18 +169,15 @@ impl AnyAgentTool for ContextServerTool {
|
||||
fn run(
|
||||
self: Arc<Self>,
|
||||
input: serde_json::Value,
|
||||
event_stream: ToolCallEventStream,
|
||||
_event_stream: ToolCallEventStream,
|
||||
cx: &mut App,
|
||||
) -> Task<Result<AgentToolOutput>> {
|
||||
let Some(server) = self.store.read(cx).get_running_server(&self.server_id) else {
|
||||
return Task::ready(Err(anyhow!("Context server not found")));
|
||||
};
|
||||
let tool_name = self.tool.name.clone();
|
||||
let authorize = event_stream.authorize(self.initial_title(input.clone(), cx), cx);
|
||||
|
||||
cx.spawn(async move |_cx| {
|
||||
authorize.await?;
|
||||
|
||||
let Some(protocol) = server.client() else {
|
||||
bail!("Context server not initialized");
|
||||
};
|
||||
|
||||
@@ -9,14 +9,14 @@ use std::sync::Arc;
|
||||
use util::markdown::MarkdownInlineCode;
|
||||
|
||||
/// Copies a file or directory in the project, and returns confirmation that the copy succeeded.
|
||||
/// Directory contents will be copied recursively.
|
||||
/// Directory contents will be copied recursively (like `cp -r`).
|
||||
///
|
||||
/// This tool should be used when it's desirable to create a copy of a file or directory without modifying the original.
|
||||
/// It's much more efficient than doing this by separately reading and then writing the file or directory's contents, so this tool should be preferred over that approach whenever copying is the goal.
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct CopyPathToolInput {
|
||||
/// The source path of the file or directory to copy.
|
||||
/// If a directory is specified, its contents will be copied recursively.
|
||||
/// If a directory is specified, its contents will be copied recursively (like `cp -r`).
|
||||
///
|
||||
/// <example>
|
||||
/// If the project has the following files:
|
||||
@@ -58,11 +58,7 @@ impl AgentTool for CopyPathTool {
|
||||
ToolKind::Move
|
||||
}
|
||||
|
||||
fn initial_title(
|
||||
&self,
|
||||
input: Result<Self::Input, serde_json::Value>,
|
||||
_cx: &mut App,
|
||||
) -> ui::SharedString {
|
||||
fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> ui::SharedString {
|
||||
if let Ok(input) = input {
|
||||
let src = MarkdownInlineCode(&input.source_path);
|
||||
let dest = MarkdownInlineCode(&input.destination_path);
|
||||
@@ -84,7 +80,9 @@ impl AgentTool for CopyPathTool {
|
||||
.and_then(|project_path| project.entry_for_path(&project_path, cx))
|
||||
{
|
||||
Some(entity) => match project.find_project_path(&input.destination_path, cx) {
|
||||
Some(project_path) => project.copy_entry(entity.id, project_path, cx),
|
||||
Some(project_path) => {
|
||||
project.copy_entry(entity.id, None, project_path.path, cx)
|
||||
}
|
||||
None => Task::ready(Err(anyhow!(
|
||||
"Destination path {} was outside the project.",
|
||||
input.destination_path
|
||||
|
||||
@@ -11,7 +11,7 @@ use crate::{AgentTool, ToolCallEventStream};
|
||||
|
||||
/// Creates a new directory at the specified path within the project. Returns confirmation that the directory was created.
|
||||
///
|
||||
/// This tool creates a directory and all necessary parent directories. It should be used whenever you need to create new directories within the project.
|
||||
/// This tool creates a directory and all necessary parent directories (similar to `mkdir -p`). It should be used whenever you need to create new directories within the project.
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct CreateDirectoryToolInput {
|
||||
/// The path of the new directory.
|
||||
@@ -49,11 +49,7 @@ impl AgentTool for CreateDirectoryTool {
|
||||
ToolKind::Read
|
||||
}
|
||||
|
||||
fn initial_title(
|
||||
&self,
|
||||
input: Result<Self::Input, serde_json::Value>,
|
||||
_cx: &mut App,
|
||||
) -> SharedString {
|
||||
fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString {
|
||||
if let Ok(input) = input {
|
||||
format!("Create directory {}", MarkdownInlineCode(&input.path)).into()
|
||||
} else {
|
||||
|
||||
@@ -52,11 +52,7 @@ impl AgentTool for DeletePathTool {
|
||||
ToolKind::Delete
|
||||
}
|
||||
|
||||
fn initial_title(
|
||||
&self,
|
||||
input: Result<Self::Input, serde_json::Value>,
|
||||
_cx: &mut App,
|
||||
) -> SharedString {
|
||||
fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString {
|
||||
if let Ok(input) = input {
|
||||
format!("Delete “`{}`”", input.path).into()
|
||||
} else {
|
||||
|
||||
@@ -6,7 +6,7 @@ use language::{DiagnosticSeverity, OffsetRangeExt};
|
||||
use project::Project;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{fmt::Write, sync::Arc};
|
||||
use std::{fmt::Write, path::Path, sync::Arc};
|
||||
use ui::SharedString;
|
||||
use util::markdown::MarkdownInlineCode;
|
||||
|
||||
@@ -71,11 +71,7 @@ impl AgentTool for DiagnosticsTool {
|
||||
acp::ToolKind::Read
|
||||
}
|
||||
|
||||
fn initial_title(
|
||||
&self,
|
||||
input: Result<Self::Input, serde_json::Value>,
|
||||
_cx: &mut App,
|
||||
) -> SharedString {
|
||||
fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString {
|
||||
if let Some(path) = input.ok().and_then(|input| match input.path {
|
||||
Some(path) if !path.is_empty() => Some(path),
|
||||
_ => None,
|
||||
@@ -147,7 +143,9 @@ impl AgentTool for DiagnosticsTool {
|
||||
has_diagnostics = true;
|
||||
output.push_str(&format!(
|
||||
"{}: {} error(s), {} warning(s)\n",
|
||||
worktree.read(cx).absolutize(&project_path.path).display(),
|
||||
Path::new(worktree.read(cx).root_name())
|
||||
.join(project_path.path)
|
||||
.display(),
|
||||
summary.error_count,
|
||||
summary.warning_count
|
||||
));
|
||||
|
||||
@@ -17,12 +17,10 @@ use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use settings::Settings;
|
||||
use smol::stream::StreamExt as _;
|
||||
use std::ffi::OsStr;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use ui::SharedString;
|
||||
use util::ResultExt;
|
||||
use util::rel_path::RelPath;
|
||||
|
||||
const DEFAULT_UI_TEXT: &str = "Editing file";
|
||||
|
||||
@@ -85,7 +83,6 @@ struct EditFileToolPartialInput {
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
#[schemars(inline)]
|
||||
pub enum EditFileMode {
|
||||
Edit,
|
||||
Create,
|
||||
@@ -122,17 +119,11 @@ impl From<EditFileToolOutput> for LanguageModelToolResultContent {
|
||||
pub struct EditFileTool {
|
||||
thread: WeakEntity<Thread>,
|
||||
language_registry: Arc<LanguageRegistry>,
|
||||
project: Entity<Project>,
|
||||
}
|
||||
|
||||
impl EditFileTool {
|
||||
pub fn new(
|
||||
project: Entity<Project>,
|
||||
thread: WeakEntity<Thread>,
|
||||
language_registry: Arc<LanguageRegistry>,
|
||||
) -> Self {
|
||||
pub fn new(thread: WeakEntity<Thread>, language_registry: Arc<LanguageRegistry>) -> Self {
|
||||
Self {
|
||||
project,
|
||||
thread,
|
||||
language_registry,
|
||||
}
|
||||
@@ -150,11 +141,12 @@ impl EditFileTool {
|
||||
|
||||
// If any path component matches the local settings folder, then this could affect
|
||||
// the editor in ways beyond the project source, so prompt.
|
||||
let local_settings_folder = paths::local_settings_folder_name();
|
||||
let local_settings_folder = paths::local_settings_folder_relative_path();
|
||||
let path = Path::new(&input.path);
|
||||
if path.components().any(|component| {
|
||||
component.as_os_str() == <_ as AsRef<OsStr>>::as_ref(&local_settings_folder)
|
||||
}) {
|
||||
if path
|
||||
.components()
|
||||
.any(|component| component.as_os_str() == local_settings_folder.as_os_str())
|
||||
{
|
||||
return event_stream.authorize(
|
||||
format!("{} (local settings)", input.display_description),
|
||||
cx,
|
||||
@@ -163,7 +155,6 @@ impl EditFileTool {
|
||||
|
||||
// It's also possible that the global config dir is configured to be inside the project,
|
||||
// so check for that edge case too.
|
||||
// TODO this is broken when remoting
|
||||
if let Ok(canonical_path) = std::fs::canonicalize(&input.path)
|
||||
&& canonical_path.starts_with(paths::config_dir())
|
||||
{
|
||||
@@ -203,46 +194,22 @@ impl AgentTool for EditFileTool {
|
||||
acp::ToolKind::Edit
|
||||
}
|
||||
|
||||
fn initial_title(
|
||||
&self,
|
||||
input: Result<Self::Input, serde_json::Value>,
|
||||
cx: &mut App,
|
||||
) -> SharedString {
|
||||
fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString {
|
||||
match input {
|
||||
Ok(input) => self
|
||||
.project
|
||||
.read(cx)
|
||||
.find_project_path(&input.path, cx)
|
||||
.and_then(|project_path| {
|
||||
self.project
|
||||
.read(cx)
|
||||
.short_full_path_for_project_path(&project_path, cx)
|
||||
})
|
||||
.unwrap_or(input.path.to_string_lossy().into_owned())
|
||||
.into(),
|
||||
Ok(input) => input.display_description.into(),
|
||||
Err(raw_input) => {
|
||||
if let Some(input) =
|
||||
serde_json::from_value::<EditFileToolPartialInput>(raw_input).ok()
|
||||
{
|
||||
let path = input.path.trim();
|
||||
if !path.is_empty() {
|
||||
return self
|
||||
.project
|
||||
.read(cx)
|
||||
.find_project_path(&input.path, cx)
|
||||
.and_then(|project_path| {
|
||||
self.project
|
||||
.read(cx)
|
||||
.short_full_path_for_project_path(&project_path, cx)
|
||||
})
|
||||
.unwrap_or(input.path)
|
||||
.into();
|
||||
}
|
||||
|
||||
let description = input.display_description.trim();
|
||||
if !description.is_empty() {
|
||||
return description.to_string().into();
|
||||
}
|
||||
|
||||
let path = input.path.trim().to_string();
|
||||
if !path.is_empty() {
|
||||
return path.into();
|
||||
}
|
||||
}
|
||||
|
||||
DEFAULT_UI_TEXT.into()
|
||||
@@ -272,7 +239,6 @@ impl AgentTool for EditFileTool {
|
||||
locations: Some(vec![acp::ToolCallLocation {
|
||||
path: abs_path,
|
||||
line: None,
|
||||
meta: None,
|
||||
}]),
|
||||
..Default::default()
|
||||
});
|
||||
@@ -352,7 +318,7 @@ impl AgentTool for EditFileTool {
|
||||
}).ok();
|
||||
if let Some(abs_path) = abs_path.clone() {
|
||||
event_stream.update_fields(ToolCallUpdateFields {
|
||||
locations: Some(vec![ToolCallLocation { path: abs_path, line, meta: None }]),
|
||||
locations: Some(vec![ToolCallLocation { path: abs_path, line }]),
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
@@ -476,7 +442,7 @@ impl AgentTool for EditFileTool {
|
||||
) -> Result<()> {
|
||||
event_stream.update_diff(cx.new(|cx| {
|
||||
Diff::finalized(
|
||||
output.input_path.to_string_lossy().into_owned(),
|
||||
output.input_path,
|
||||
Some(output.old_text.to_string()),
|
||||
output.new_text,
|
||||
self.language_registry.clone(),
|
||||
@@ -540,12 +506,10 @@ fn resolve_path(
|
||||
let file_name = input
|
||||
.path
|
||||
.file_name()
|
||||
.and_then(|file_name| file_name.to_str())
|
||||
.and_then(|file_name| RelPath::unix(file_name).ok())
|
||||
.context("Can't create file: invalid filename")?;
|
||||
|
||||
let new_file_path = parent_project_path.map(|parent| ProjectPath {
|
||||
path: parent.path.join(file_name),
|
||||
path: Arc::from(parent.path.join(file_name)),
|
||||
..parent
|
||||
});
|
||||
|
||||
@@ -565,7 +529,7 @@ mod tests {
|
||||
use prompt_store::ProjectContext;
|
||||
use serde_json::json;
|
||||
use settings::SettingsStore;
|
||||
use util::{path, rel_path::rel_path};
|
||||
use util::path;
|
||||
|
||||
#[gpui::test]
|
||||
async fn test_edit_nonexistent_file(cx: &mut TestAppContext) {
|
||||
@@ -580,7 +544,7 @@ mod tests {
|
||||
let model = Arc::new(FakeLanguageModel::default());
|
||||
let thread = cx.new(|cx| {
|
||||
Thread::new(
|
||||
project.clone(),
|
||||
project,
|
||||
cx.new(|_cx| ProjectContext::default()),
|
||||
context_server_registry,
|
||||
Templates::new(),
|
||||
@@ -595,12 +559,11 @@ mod tests {
|
||||
path: "root/nonexistent_file.txt".into(),
|
||||
mode: EditFileMode::Edit,
|
||||
};
|
||||
Arc::new(EditFileTool::new(
|
||||
project,
|
||||
thread.downgrade(),
|
||||
language_registry,
|
||||
))
|
||||
.run(input, ToolCallEventStream::test().0, cx)
|
||||
Arc::new(EditFileTool::new(thread.downgrade(), language_registry)).run(
|
||||
input,
|
||||
ToolCallEventStream::test().0,
|
||||
cx,
|
||||
)
|
||||
})
|
||||
.await;
|
||||
assert_eq!(
|
||||
@@ -614,13 +577,13 @@ mod tests {
|
||||
let mode = &EditFileMode::Create;
|
||||
|
||||
let result = test_resolve_path(mode, "root/new.txt", cx);
|
||||
assert_resolved_path_eq(result.await, rel_path("new.txt"));
|
||||
assert_resolved_path_eq(result.await, "new.txt");
|
||||
|
||||
let result = test_resolve_path(mode, "new.txt", cx);
|
||||
assert_resolved_path_eq(result.await, rel_path("new.txt"));
|
||||
assert_resolved_path_eq(result.await, "new.txt");
|
||||
|
||||
let result = test_resolve_path(mode, "dir/new.txt", cx);
|
||||
assert_resolved_path_eq(result.await, rel_path("dir/new.txt"));
|
||||
assert_resolved_path_eq(result.await, "dir/new.txt");
|
||||
|
||||
let result = test_resolve_path(mode, "root/dir/subdir/existing.txt", cx);
|
||||
assert_eq!(
|
||||
@@ -642,10 +605,10 @@ mod tests {
|
||||
let path_with_root = "root/dir/subdir/existing.txt";
|
||||
let path_without_root = "dir/subdir/existing.txt";
|
||||
let result = test_resolve_path(mode, path_with_root, cx);
|
||||
assert_resolved_path_eq(result.await, rel_path(path_without_root));
|
||||
assert_resolved_path_eq(result.await, path_without_root);
|
||||
|
||||
let result = test_resolve_path(mode, path_without_root, cx);
|
||||
assert_resolved_path_eq(result.await, rel_path(path_without_root));
|
||||
assert_resolved_path_eq(result.await, path_without_root);
|
||||
|
||||
let result = test_resolve_path(mode, "root/nonexistent.txt", cx);
|
||||
assert_eq!(
|
||||
@@ -690,10 +653,14 @@ mod tests {
|
||||
cx.update(|cx| resolve_path(&input, project, cx))
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
fn assert_resolved_path_eq(path: anyhow::Result<ProjectPath>, expected: &RelPath) {
|
||||
let actual = path.expect("Should return valid path").path;
|
||||
assert_eq!(actual.as_ref(), expected);
|
||||
fn assert_resolved_path_eq(path: anyhow::Result<ProjectPath>, expected: &str) {
|
||||
let actual = path
|
||||
.expect("Should return valid path")
|
||||
.path
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.replace("\\", "/"); // Naive Windows paths normalization
|
||||
assert_eq!(actual, expected);
|
||||
}
|
||||
|
||||
#[gpui::test]
|
||||
@@ -775,7 +742,7 @@ mod tests {
|
||||
let model = Arc::new(FakeLanguageModel::default());
|
||||
let thread = cx.new(|cx| {
|
||||
Thread::new(
|
||||
project.clone(),
|
||||
project,
|
||||
cx.new(|_cx| ProjectContext::default()),
|
||||
context_server_registry,
|
||||
Templates::new(),
|
||||
@@ -787,11 +754,14 @@ mod tests {
|
||||
// First, test with format_on_save enabled
|
||||
cx.update(|cx| {
|
||||
SettingsStore::update_global(cx, |store, cx| {
|
||||
store.update_user_settings(cx, |settings| {
|
||||
settings.project.all_languages.defaults.format_on_save = Some(FormatOnSave::On);
|
||||
settings.project.all_languages.defaults.formatter =
|
||||
Some(language::language_settings::FormatterList::default());
|
||||
});
|
||||
store.update_user_settings::<language::language_settings::AllLanguageSettings>(
|
||||
cx,
|
||||
|settings| {
|
||||
settings.defaults.format_on_save = Some(FormatOnSave::On);
|
||||
settings.defaults.formatter =
|
||||
Some(language::language_settings::SelectedFormatter::Auto);
|
||||
},
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -804,7 +774,6 @@ mod tests {
|
||||
mode: EditFileMode::Overwrite,
|
||||
};
|
||||
Arc::new(EditFileTool::new(
|
||||
project.clone(),
|
||||
thread.downgrade(),
|
||||
language_registry.clone(),
|
||||
))
|
||||
@@ -846,10 +815,12 @@ mod tests {
|
||||
// Next, test with format_on_save disabled
|
||||
cx.update(|cx| {
|
||||
SettingsStore::update_global(cx, |store, cx| {
|
||||
store.update_user_settings(cx, |settings| {
|
||||
settings.project.all_languages.defaults.format_on_save =
|
||||
Some(FormatOnSave::Off);
|
||||
});
|
||||
store.update_user_settings::<language::language_settings::AllLanguageSettings>(
|
||||
cx,
|
||||
|settings| {
|
||||
settings.defaults.format_on_save = Some(FormatOnSave::Off);
|
||||
},
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -861,12 +832,11 @@ mod tests {
|
||||
path: "root/src/main.rs".into(),
|
||||
mode: EditFileMode::Overwrite,
|
||||
};
|
||||
Arc::new(EditFileTool::new(
|
||||
project.clone(),
|
||||
thread.downgrade(),
|
||||
language_registry,
|
||||
))
|
||||
.run(input, ToolCallEventStream::test().0, cx)
|
||||
Arc::new(EditFileTool::new(thread.downgrade(), language_registry)).run(
|
||||
input,
|
||||
ToolCallEventStream::test().0,
|
||||
cx,
|
||||
)
|
||||
});
|
||||
|
||||
// Stream the unformatted content
|
||||
@@ -914,7 +884,7 @@ mod tests {
|
||||
let model = Arc::new(FakeLanguageModel::default());
|
||||
let thread = cx.new(|cx| {
|
||||
Thread::new(
|
||||
project.clone(),
|
||||
project,
|
||||
cx.new(|_cx| ProjectContext::default()),
|
||||
context_server_registry,
|
||||
Templates::new(),
|
||||
@@ -926,13 +896,12 @@ mod tests {
|
||||
// First, test with remove_trailing_whitespace_on_save enabled
|
||||
cx.update(|cx| {
|
||||
SettingsStore::update_global(cx, |store, cx| {
|
||||
store.update_user_settings(cx, |settings| {
|
||||
settings
|
||||
.project
|
||||
.all_languages
|
||||
.defaults
|
||||
.remove_trailing_whitespace_on_save = Some(true);
|
||||
});
|
||||
store.update_user_settings::<language::language_settings::AllLanguageSettings>(
|
||||
cx,
|
||||
|settings| {
|
||||
settings.defaults.remove_trailing_whitespace_on_save = Some(true);
|
||||
},
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -948,7 +917,6 @@ mod tests {
|
||||
mode: EditFileMode::Overwrite,
|
||||
};
|
||||
Arc::new(EditFileTool::new(
|
||||
project.clone(),
|
||||
thread.downgrade(),
|
||||
language_registry.clone(),
|
||||
))
|
||||
@@ -983,13 +951,12 @@ mod tests {
|
||||
// Next, test with remove_trailing_whitespace_on_save disabled
|
||||
cx.update(|cx| {
|
||||
SettingsStore::update_global(cx, |store, cx| {
|
||||
store.update_user_settings(cx, |settings| {
|
||||
settings
|
||||
.project
|
||||
.all_languages
|
||||
.defaults
|
||||
.remove_trailing_whitespace_on_save = Some(false);
|
||||
});
|
||||
store.update_user_settings::<language::language_settings::AllLanguageSettings>(
|
||||
cx,
|
||||
|settings| {
|
||||
settings.defaults.remove_trailing_whitespace_on_save = Some(false);
|
||||
},
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1001,12 +968,11 @@ mod tests {
|
||||
path: "root/src/main.rs".into(),
|
||||
mode: EditFileMode::Overwrite,
|
||||
};
|
||||
Arc::new(EditFileTool::new(
|
||||
project.clone(),
|
||||
thread.downgrade(),
|
||||
language_registry,
|
||||
))
|
||||
.run(input, ToolCallEventStream::test().0, cx)
|
||||
Arc::new(EditFileTool::new(thread.downgrade(), language_registry)).run(
|
||||
input,
|
||||
ToolCallEventStream::test().0,
|
||||
cx,
|
||||
)
|
||||
});
|
||||
|
||||
// Stream the content with trailing whitespace
|
||||
@@ -1045,7 +1011,7 @@ mod tests {
|
||||
let model = Arc::new(FakeLanguageModel::default());
|
||||
let thread = cx.new(|cx| {
|
||||
Thread::new(
|
||||
project.clone(),
|
||||
project,
|
||||
cx.new(|_cx| ProjectContext::default()),
|
||||
context_server_registry,
|
||||
Templates::new(),
|
||||
@@ -1053,11 +1019,7 @@ mod tests {
|
||||
cx,
|
||||
)
|
||||
});
|
||||
let tool = Arc::new(EditFileTool::new(
|
||||
project.clone(),
|
||||
thread.downgrade(),
|
||||
language_registry,
|
||||
));
|
||||
let tool = Arc::new(EditFileTool::new(thread.downgrade(), language_registry));
|
||||
fs.insert_tree("/root", json!({})).await;
|
||||
|
||||
// Test 1: Path with .zed component should require confirmation
|
||||
@@ -1185,7 +1147,7 @@ mod tests {
|
||||
let model = Arc::new(FakeLanguageModel::default());
|
||||
let thread = cx.new(|cx| {
|
||||
Thread::new(
|
||||
project.clone(),
|
||||
project,
|
||||
cx.new(|_cx| ProjectContext::default()),
|
||||
context_server_registry,
|
||||
Templates::new(),
|
||||
@@ -1193,11 +1155,7 @@ mod tests {
|
||||
cx,
|
||||
)
|
||||
});
|
||||
let tool = Arc::new(EditFileTool::new(
|
||||
project.clone(),
|
||||
thread.downgrade(),
|
||||
language_registry,
|
||||
));
|
||||
let tool = Arc::new(EditFileTool::new(thread.downgrade(), language_registry));
|
||||
|
||||
// Test global config paths - these should require confirmation if they exist and are outside the project
|
||||
let test_cases = vec![
|
||||
@@ -1305,11 +1263,7 @@ mod tests {
|
||||
cx,
|
||||
)
|
||||
});
|
||||
let tool = Arc::new(EditFileTool::new(
|
||||
project.clone(),
|
||||
thread.downgrade(),
|
||||
language_registry,
|
||||
));
|
||||
let tool = Arc::new(EditFileTool::new(thread.downgrade(), language_registry));
|
||||
|
||||
// Test files in different worktrees
|
||||
let test_cases = vec![
|
||||
@@ -1389,11 +1343,7 @@ mod tests {
|
||||
cx,
|
||||
)
|
||||
});
|
||||
let tool = Arc::new(EditFileTool::new(
|
||||
project.clone(),
|
||||
thread.downgrade(),
|
||||
language_registry,
|
||||
));
|
||||
let tool = Arc::new(EditFileTool::new(thread.downgrade(), language_registry));
|
||||
|
||||
// Test edge cases
|
||||
let test_cases = vec![
|
||||
@@ -1404,8 +1354,8 @@ mod tests {
|
||||
// Parent directory references - find_project_path resolves these
|
||||
(
|
||||
"project/../other",
|
||||
true,
|
||||
"Path with .. that goes outside of root directory",
|
||||
false,
|
||||
"Path with .. is resolved by find_project_path",
|
||||
),
|
||||
(
|
||||
"project/./src/file.rs",
|
||||
@@ -1433,18 +1383,16 @@ mod tests {
|
||||
)
|
||||
});
|
||||
|
||||
cx.run_until_parked();
|
||||
|
||||
if should_confirm {
|
||||
stream_rx.expect_authorization().await;
|
||||
} else {
|
||||
auth.await.unwrap();
|
||||
assert!(
|
||||
stream_rx.try_next().is_err(),
|
||||
"Failed for case: {} - path: {} - expected no confirmation but got one",
|
||||
description,
|
||||
path
|
||||
);
|
||||
auth.await.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1478,11 +1426,7 @@ mod tests {
|
||||
cx,
|
||||
)
|
||||
});
|
||||
let tool = Arc::new(EditFileTool::new(
|
||||
project.clone(),
|
||||
thread.downgrade(),
|
||||
language_registry,
|
||||
));
|
||||
let tool = Arc::new(EditFileTool::new(thread.downgrade(), language_registry));
|
||||
|
||||
// Test different EditFileMode values
|
||||
let modes = vec![
|
||||
@@ -1562,67 +1506,48 @@ mod tests {
|
||||
cx,
|
||||
)
|
||||
});
|
||||
let tool = Arc::new(EditFileTool::new(
|
||||
project,
|
||||
thread.downgrade(),
|
||||
language_registry,
|
||||
));
|
||||
let tool = Arc::new(EditFileTool::new(thread.downgrade(), language_registry));
|
||||
|
||||
cx.update(|cx| {
|
||||
// ...
|
||||
assert_eq!(
|
||||
tool.initial_title(
|
||||
Err(json!({
|
||||
"path": "src/main.rs",
|
||||
"display_description": "",
|
||||
"old_string": "old code",
|
||||
"new_string": "new code"
|
||||
})),
|
||||
cx
|
||||
),
|
||||
"src/main.rs"
|
||||
);
|
||||
assert_eq!(
|
||||
tool.initial_title(
|
||||
Err(json!({
|
||||
"path": "",
|
||||
"display_description": "Fix error handling",
|
||||
"old_string": "old code",
|
||||
"new_string": "new code"
|
||||
})),
|
||||
cx
|
||||
),
|
||||
"Fix error handling"
|
||||
);
|
||||
assert_eq!(
|
||||
tool.initial_title(
|
||||
Err(json!({
|
||||
"path": "src/main.rs",
|
||||
"display_description": "Fix error handling",
|
||||
"old_string": "old code",
|
||||
"new_string": "new code"
|
||||
})),
|
||||
cx
|
||||
),
|
||||
"src/main.rs"
|
||||
);
|
||||
assert_eq!(
|
||||
tool.initial_title(
|
||||
Err(json!({
|
||||
"path": "",
|
||||
"display_description": "",
|
||||
"old_string": "old code",
|
||||
"new_string": "new code"
|
||||
})),
|
||||
cx
|
||||
),
|
||||
DEFAULT_UI_TEXT
|
||||
);
|
||||
assert_eq!(
|
||||
tool.initial_title(Err(serde_json::Value::Null), cx),
|
||||
DEFAULT_UI_TEXT
|
||||
);
|
||||
});
|
||||
assert_eq!(
|
||||
tool.initial_title(Err(json!({
|
||||
"path": "src/main.rs",
|
||||
"display_description": "",
|
||||
"old_string": "old code",
|
||||
"new_string": "new code"
|
||||
}))),
|
||||
"src/main.rs"
|
||||
);
|
||||
assert_eq!(
|
||||
tool.initial_title(Err(json!({
|
||||
"path": "",
|
||||
"display_description": "Fix error handling",
|
||||
"old_string": "old code",
|
||||
"new_string": "new code"
|
||||
}))),
|
||||
"Fix error handling"
|
||||
);
|
||||
assert_eq!(
|
||||
tool.initial_title(Err(json!({
|
||||
"path": "src/main.rs",
|
||||
"display_description": "Fix error handling",
|
||||
"old_string": "old code",
|
||||
"new_string": "new code"
|
||||
}))),
|
||||
"Fix error handling"
|
||||
);
|
||||
assert_eq!(
|
||||
tool.initial_title(Err(json!({
|
||||
"path": "",
|
||||
"display_description": "",
|
||||
"old_string": "old code",
|
||||
"new_string": "new code"
|
||||
}))),
|
||||
DEFAULT_UI_TEXT
|
||||
);
|
||||
assert_eq!(
|
||||
tool.initial_title(Err(serde_json::Value::Null)),
|
||||
DEFAULT_UI_TEXT
|
||||
);
|
||||
}
|
||||
|
||||
#[gpui::test]
|
||||
@@ -1649,11 +1574,7 @@ mod tests {
|
||||
|
||||
// Ensure the diff is finalized after the edit completes.
|
||||
{
|
||||
let tool = Arc::new(EditFileTool::new(
|
||||
project.clone(),
|
||||
thread.downgrade(),
|
||||
languages.clone(),
|
||||
));
|
||||
let tool = Arc::new(EditFileTool::new(thread.downgrade(), languages.clone()));
|
||||
let (stream_tx, mut stream_rx) = ToolCallEventStream::test();
|
||||
let edit = cx.update(|cx| {
|
||||
tool.run(
|
||||
@@ -1678,11 +1599,7 @@ mod tests {
|
||||
// Ensure the diff is finalized if an error occurs while editing.
|
||||
{
|
||||
model.forbid_requests();
|
||||
let tool = Arc::new(EditFileTool::new(
|
||||
project.clone(),
|
||||
thread.downgrade(),
|
||||
languages.clone(),
|
||||
));
|
||||
let tool = Arc::new(EditFileTool::new(thread.downgrade(), languages.clone()));
|
||||
let (stream_tx, mut stream_rx) = ToolCallEventStream::test();
|
||||
let edit = cx.update(|cx| {
|
||||
tool.run(
|
||||
@@ -1705,11 +1622,7 @@ mod tests {
|
||||
|
||||
// Ensure the diff is finalized if the tool call gets dropped.
|
||||
{
|
||||
let tool = Arc::new(EditFileTool::new(
|
||||
project.clone(),
|
||||
thread.downgrade(),
|
||||
languages.clone(),
|
||||
));
|
||||
let tool = Arc::new(EditFileTool::new(thread.downgrade(), languages.clone()));
|
||||
let (stream_tx, mut stream_rx) = ToolCallEventStream::test();
|
||||
let edit = cx.update(|cx| {
|
||||
tool.run(
|
||||
|
||||
@@ -126,11 +126,7 @@ impl AgentTool for FetchTool {
|
||||
acp::ToolKind::Fetch
|
||||
}
|
||||
|
||||
fn initial_title(
|
||||
&self,
|
||||
input: Result<Self::Input, serde_json::Value>,
|
||||
_cx: &mut App,
|
||||
) -> SharedString {
|
||||
fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString {
|
||||
match input {
|
||||
Ok(input) => format!("Fetch {}", MarkdownEscaped(&input.url)).into(),
|
||||
Err(_) => "Fetch URL".into(),
|
||||
|
||||
@@ -93,11 +93,7 @@ impl AgentTool for FindPathTool {
|
||||
acp::ToolKind::Search
|
||||
}
|
||||
|
||||
fn initial_title(
|
||||
&self,
|
||||
input: Result<Self::Input, serde_json::Value>,
|
||||
_cx: &mut App,
|
||||
) -> SharedString {
|
||||
fn initial_title(&self, input: Result<Self::Input, serde_json::Value>) -> SharedString {
|
||||
let mut title = "Find paths".to_string();
|
||||
if let Ok(input) = input {
|
||||
title.push_str(&format!(" matching “`{}`”", input.glob));
|
||||
@@ -138,7 +134,6 @@ impl AgentTool for FindPathTool {
|
||||
mime_type: None,
|
||||
size: None,
|
||||
title: None,
|
||||
meta: None,
|
||||
}),
|
||||
})
|
||||
.collect(),
|
||||
@@ -156,14 +151,10 @@ impl AgentTool for FindPathTool {
|
||||
}
|
||||
|
||||
fn search_paths(glob: &str, project: Entity<Project>, cx: &mut App) -> Task<Result<Vec<PathBuf>>> {
|
||||
let path_style = project.read(cx).path_style(cx);
|
||||
let path_matcher = match PathMatcher::new(
|
||||
[
|
||||
// Sometimes models try to search for "". In this case, return all paths in the project.
|
||||
if glob.is_empty() { "*" } else { glob },
|
||||
],
|
||||
path_style,
|
||||
) {
|
||||
let path_matcher = match PathMatcher::new([
|
||||
// Sometimes models try to search for "". In this case, return all paths in the project.
|
||||
if glob.is_empty() { "*" } else { glob },
|
||||
]) {
|
||||
Ok(matcher) => matcher,
|
||||
Err(err) => return Task::ready(Err(anyhow!("Invalid glob: {err}"))),
|
||||
};
|
||||
@@ -177,8 +168,9 @@ fn search_paths(glob: &str, project: Entity<Project>, cx: &mut App) -> Task<Resu
|
||||
let mut results = Vec::new();
|
||||
for snapshot in snapshots {
|
||||
for entry in snapshot.entries(false, 0) {
|
||||
if path_matcher.is_match(snapshot.root_name().join(&entry.path).as_std_path()) {
|
||||
results.push(snapshot.absolutize(&entry.path));
|
||||
let root_name = PathBuf::from(snapshot.root_name());
|
||||
if path_matcher.is_match(root_name.join(&entry.path)) {
|
||||
results.push(snapshot.abs_path().join(entry.path.as_ref()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||