Compare commits

...

6 Commits

Author SHA1 Message Date
Kyle Kelley
473bc89d3a Allow dynamic use of RunningKernels as trait objects
- Modify NativeRunningKernel and RemoteRunningKernel to return
  Box<dyn RunningKernel> instead of concrete types
- Update Session to handle RunningKernels as trait objects
- Implement trait methods for both native and remote kernels

This also has remote kernel specs hooked up in a hardcoded way for now.
2024-11-20 13:18:36 -08:00
Kyle Kelley
118e7a66b3 Pass View<Session> in to remote kernel and launch via gpui::Task 2024-11-20 13:18:36 -08:00
Kyle Kelley
6469500330 Implement native kernel process monitoring
- Add process status monitoring for native kernels
- Remove unnecessary messaging and process status tasks from Session
- Refactor kernel error handling to be more centralized
- Update NativeRunningKernel to handle its own process status
- Simplify Session code by moving some responsibilities to NativeRunningKernel
2024-11-20 13:18:36 -08:00
Kyle Kelley
694231afd1 clean up priors 2024-11-20 13:18:36 -08:00
Kyle Kelley
566c93a0f5 collect stdout and stderr in the native kernel launch 2024-11-20 13:18:36 -08:00
Kyle Kelley
1430718d1a start off just using the request_tx setup for consistency sake 2024-11-20 13:18:36 -08:00
3 changed files with 222 additions and 195 deletions

View File

@@ -1,10 +1,11 @@
use anyhow::{Context as _, Result};
use futures::{
channel::mpsc::{self},
io::BufReader,
stream::{SelectAll, StreamExt},
SinkExt as _,
AsyncBufReadExt as _, SinkExt as _,
};
use gpui::{AppContext, EntityId, Task};
use gpui::{EntityId, Task, View, WindowContext};
use jupyter_protocol::{JupyterMessage, JupyterMessageContent, KernelInfoReply};
use project::Fs;
use runtimelib::{dirs, ConnectionInfo, ExecutionState, JupyterKernelspec};
@@ -18,7 +19,9 @@ use std::{
};
use uuid::Uuid;
use super::{JupyterMessageChannel, RunningKernel};
use crate::Session;
use super::RunningKernel;
#[derive(Debug, Clone)]
pub struct LocalKernelSpecification {
@@ -89,10 +92,10 @@ async fn peek_ports(ip: IpAddr) -> Result<[u16; 5]> {
pub struct NativeRunningKernel {
pub process: smol::process::Child,
_shell_task: Task<Result<()>>,
_iopub_task: Task<Result<()>>,
_control_task: Task<Result<()>>,
_routing_task: Task<Result<()>>,
connection_path: PathBuf,
_process_status_task: Option<Task<()>>,
pub working_directory: PathBuf,
pub request_tx: mpsc::Sender<JupyterMessage>,
pub execution_state: ExecutionState,
@@ -113,8 +116,10 @@ impl NativeRunningKernel {
entity_id: EntityId,
working_directory: PathBuf,
fs: Arc<dyn Fs>,
cx: &mut AppContext,
) -> Task<Result<(Self, JupyterMessageChannel)>> {
// todo: convert to weak view
session: View<Session>,
cx: &mut WindowContext,
) -> Task<Result<Box<dyn RunningKernel>>> {
cx.spawn(|cx| async move {
let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
let ports = peek_ports(ip).await?;
@@ -142,7 +147,7 @@ impl NativeRunningKernel {
let mut cmd = kernel_specification.command(&connection_path)?;
let process = cmd
let mut process = cmd
.current_dir(&working_directory)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
@@ -161,8 +166,6 @@ impl NativeRunningKernel {
let mut control_socket =
runtimelib::create_client_control_connection(&connection_info, &session_id).await?;
let (mut iopub, iosub) = futures::channel::mpsc::channel(100);
let (request_tx, mut request_rx) =
futures::channel::mpsc::channel::<JupyterMessage>(100);
@@ -170,18 +173,41 @@ impl NativeRunningKernel {
let (mut shell_reply_tx, shell_reply_rx) = futures::channel::mpsc::channel(100);
let mut messages_rx = SelectAll::new();
messages_rx.push(iosub);
messages_rx.push(control_reply_rx);
messages_rx.push(shell_reply_rx);
let iopub_task = cx.background_executor().spawn({
async move {
while let Ok(message) = iopub_socket.read().await {
iopub.send(message).await?;
cx.spawn({
let session = session.clone();
|mut cx| async move {
while let Some(message) = messages_rx.next().await {
session
.update(&mut cx, |session, cx| {
session.route(&message, cx);
})
.ok();
}
anyhow::Ok(())
}
});
})
.detach();
// iopub task
cx.spawn({
let session = session.clone();
|mut cx| async move {
while let Ok(message) = iopub_socket.read().await {
session
.update(&mut cx, |session, cx| {
session.route(&message, cx);
})
.ok();
}
anyhow::Ok(())
}
})
.detach();
let (mut control_request_tx, mut control_request_rx) =
futures::channel::mpsc::channel(100);
@@ -227,21 +253,74 @@ impl NativeRunningKernel {
}
});
anyhow::Ok((
Self {
process,
request_tx,
working_directory,
_shell_task: shell_task,
_iopub_task: iopub_task,
_control_task: control_task,
_routing_task: routing_task,
connection_path,
execution_state: ExecutionState::Idle,
kernel_info: None,
},
messages_rx,
))
let stderr = process.stderr.take();
cx.spawn(|mut _cx| async move {
if stderr.is_none() {
return;
}
let reader = BufReader::new(stderr.unwrap());
let mut lines = reader.lines();
while let Some(Ok(line)) = lines.next().await {
log::error!("kernel: {}", line);
}
})
.detach();
let stdout = process.stdout.take();
cx.spawn(|mut _cx| async move {
if stdout.is_none() {
return;
}
let reader = BufReader::new(stdout.unwrap());
let mut lines = reader.lines();
while let Some(Ok(line)) = lines.next().await {
log::info!("kernel: {}", line);
}
})
.detach();
let status = process.status();
let process_status_task = cx.spawn(|mut cx| async move {
let error_message = match status.await {
Ok(status) => {
if status.success() {
log::info!("kernel process exited successfully");
return;
}
format!("kernel process exited with status: {:?}", status)
}
Err(err) => {
format!("kernel process exited with error: {:?}", err)
}
};
log::error!("{}", error_message);
session
.update(&mut cx, |session, cx| {
session.kernel_errored(error_message, cx);
cx.notify();
})
.ok();
});
anyhow::Ok(Box::new(Self {
process,
request_tx,
working_directory,
_process_status_task: Some(process_status_task),
_shell_task: shell_task,
_control_task: control_task,
_routing_task: routing_task,
connection_path,
execution_state: ExecutionState::Idle,
kernel_info: None,
}) as Box<dyn RunningKernel>)
})
}
}

View File

@@ -1,11 +1,13 @@
use futures::{channel::mpsc, StreamExt as _};
use gpui::AppContext;
use futures::{channel::mpsc, SinkExt as _, StreamExt as _};
use gpui::{Task, View, WindowContext};
use jupyter_protocol::{ExecutionState, JupyterMessage, KernelInfoReply};
// todo(kyle): figure out if this needs to be different
use runtimelib::JupyterKernelspec;
use crate::Session;
use super::RunningKernel;
use jupyter_websocket_client::RemoteServer;
use anyhow::Result;
use jupyter_websocket_client::{JupyterWebSocketReader, JupyterWebSocketWriter, RemoteServer};
use std::fmt::Debug;
#[derive(Debug, Clone)]
@@ -26,6 +28,8 @@ impl Eq for RemoteKernelSpecification {}
pub struct RemoteRunningKernel {
remote_server: RemoteServer,
_receiving_task: Task<Result<()>>,
_routing_task: Task<Result<()>>,
pub working_directory: std::path::PathBuf,
pub request_tx: mpsc::Sender<JupyterMessage>,
pub execution_state: ExecutionState,
@@ -33,48 +37,70 @@ pub struct RemoteRunningKernel {
}
impl RemoteRunningKernel {
pub async fn new(
pub fn new(
kernelspec: RemoteKernelSpecification,
working_directory: std::path::PathBuf,
request_tx: mpsc::Sender<JupyterMessage>,
_cx: &mut AppContext,
) -> anyhow::Result<(
Self,
(), // Stream<Item=JupyterMessage>
)> {
session: View<Session>,
cx: &mut WindowContext,
) -> Task<Result<Box<dyn RunningKernel>>> {
let remote_server = RemoteServer {
base_url: kernelspec.url,
token: kernelspec.token,
};
cx.spawn(|cx| async move {
// todo: launch a kernel to get a kernel ID
let kernel_id = "d77b481b-2f14-4528-af0a-6c4c9ca98085";
// todo: launch a kernel to get a kernel ID
let kernel_id = "not-implemented";
let kernel_socket = remote_server.connect_to_kernel(kernel_id).await?;
let kernel_socket = remote_server.connect_to_kernel(kernel_id).await?;
let (mut w, mut r): (JupyterWebSocketWriter, JupyterWebSocketReader) =
kernel_socket.split();
let (mut _w, mut _r) = kernel_socket.split();
let (request_tx, mut request_rx) =
futures::channel::mpsc::channel::<JupyterMessage>(100);
let (_messages_tx, _messages_rx) = mpsc::channel::<JupyterMessage>(100);
let routing_task = cx.background_executor().spawn({
async move {
while let Some(message) = request_rx.next().await {
w.send(message).await.ok();
}
Ok(())
}
});
// let routing_task = cx.background_executor().spawn({
// async move {
// while let Some(message) = request_rx.next().await {
// w.send(message).await;
// }
// }
// });
// let messages_rx = r.into();
let receiving_task = cx.spawn({
let session = session.clone();
anyhow::Ok((
Self {
|mut cx| async move {
while let Some(message) = r.next().await {
match message {
Ok(message) => {
session
.update(&mut cx, |session, cx| {
session.route(&message, cx);
})
.ok();
}
Err(e) => {
log::error!("Error receiving message: {:?}", e);
}
}
}
Ok(())
}
});
anyhow::Ok(Box::new(Self {
_routing_task: routing_task,
_receiving_task: receiving_task,
remote_server,
working_directory,
request_tx,
// todo(kyle): pull this from the kernel API to start with
execution_state: ExecutionState::Idle,
kernel_info: None,
},
(),
))
}) as Box<dyn RunningKernel>)
})
}
}

View File

@@ -1,4 +1,5 @@
use crate::components::KernelListItem;
use crate::kernels::{RemoteKernelSpecification, RemoteRunningKernel};
use crate::setup_editor_session_actions;
use crate::{
kernels::{Kernel, KernelSpecification, NativeRunningKernel},
@@ -15,16 +16,15 @@ use editor::{
scroll::Autoscroll,
Anchor, AnchorRangeExt as _, Editor, MultiBuffer, ToPoint,
};
use futures::io::BufReader;
use futures::{AsyncBufReadExt as _, FutureExt as _, StreamExt as _};
use futures::FutureExt as _;
use gpui::{
div, prelude::*, EventEmitter, Model, Render, Subscription, Task, View, ViewContext, WeakView,
};
use language::Point;
use project::Fs;
use runtimelib::{
ExecuteRequest, ExecutionState, InterruptRequest, JupyterMessage, JupyterMessageContent,
ShutdownRequest,
ExecuteRequest, ExecutionState, InterruptRequest, JupyterKernelspec, JupyterMessage,
JupyterMessageContent, ShutdownRequest,
};
use std::{env::temp_dir, ops::Range, sync::Arc, time::Duration};
use theme::ActiveTheme;
@@ -35,8 +35,6 @@ pub struct Session {
editor: WeakView<Editor>,
pub kernel: Kernel,
blocks: HashMap<String, EditorBlock>,
messaging_task: Option<Task<()>>,
process_status_task: Option<Task<()>>,
pub kernel_specification: KernelSpecification,
telemetry: Arc<Telemetry>,
_buffer_subscription: Subscription,
@@ -215,12 +213,31 @@ impl Session {
})
.ok();
// Creating a baked in kernel specification to see if remoting is working
let kernel_specification = KernelSpecification::Remote(RemoteKernelSpecification {
name: "todo".to_string(),
url: "http://localhost:8888/".to_string(),
token: std::env::var("JUPYTER_TOKEN").expect("JUPYTER_TOKEN not set"),
kernelspec: JupyterKernelspec {
argv: vec![
"python".to_string(),
"-m".to_string(),
"ipykernel_launcher".to_string(),
"-f".to_string(),
"{connection_file}".to_string(),
],
env: None,
display_name: "Python 3 (ipykernel)".to_string(),
language: "python".to_string(),
interrupt_mode: Some("signal".to_string()),
metadata: None,
},
});
let mut session = Self {
fs,
editor,
kernel: Kernel::StartingKernel(Task::ready(()).shared()),
messaging_task: None,
process_status_task: None,
blocks: HashMap::default(),
kernel_specification,
_buffer_subscription: subscription,
@@ -246,6 +263,8 @@ impl Session {
cx.entity_id().to_string(),
);
let session_view = cx.view().clone();
let kernel = match self.kernel_specification.clone() {
KernelSpecification::Jupyter(kernel_specification)
| KernelSpecification::PythonEnv(kernel_specification) => NativeRunningKernel::new(
@@ -253,11 +272,15 @@ impl Session {
entity_id,
working_directory,
self.fs.clone(),
session_view,
cx,
),
KernelSpecification::Remote(remote_kernel_specification) => RemoteRunningKernel::new(
remote_kernel_specification,
working_directory,
session_view,
cx,
),
KernelSpecification::Remote(_remote_kernel_specification) => {
unimplemented!()
}
};
let pending_kernel = cx
@@ -265,119 +288,15 @@ impl Session {
let kernel = kernel.await;
match kernel {
Ok((mut kernel, mut messages_rx)) => {
Ok(kernel) => {
this.update(&mut cx, |session, cx| {
let stderr = kernel.process.stderr.take();
cx.spawn(|_session, mut _cx| async move {
if stderr.is_none() {
return;
}
let reader = BufReader::new(stderr.unwrap());
let mut lines = reader.lines();
while let Some(Ok(line)) = lines.next().await {
// todo!(): Log stdout and stderr to something the session can show
log::error!("kernel: {}", line);
}
})
.detach();
let stdout = kernel.process.stdout.take();
cx.spawn(|_session, mut _cx| async move {
if stdout.is_none() {
return;
}
let reader = BufReader::new(stdout.unwrap());
let mut lines = reader.lines();
while let Some(Ok(line)) = lines.next().await {
log::info!("kernel: {}", line);
}
})
.detach();
let status = kernel.process.status();
session.kernel(Kernel::RunningKernel(Box::new(kernel)), cx);
let process_status_task = cx.spawn(|session, mut cx| async move {
let error_message = match status.await {
Ok(status) => {
if status.success() {
log::info!("kernel process exited successfully");
return;
}
format!("kernel process exited with status: {:?}", status)
}
Err(err) => {
format!("kernel process exited with error: {:?}", err)
}
};
log::error!("{}", error_message);
session
.update(&mut cx, |session, cx| {
session.kernel(
Kernel::ErroredLaunch(error_message.clone()),
cx,
);
session.blocks.values().for_each(|block| {
block.execution_view.update(
cx,
|execution_view, cx| {
match execution_view.status {
ExecutionStatus::Finished => {
// Do nothing when the output was good
}
_ => {
// All other cases, set the status to errored
execution_view.status =
ExecutionStatus::KernelErrored(
error_message.clone(),
)
}
}
cx.notify();
},
);
});
cx.notify();
})
.ok();
});
session.process_status_task = Some(process_status_task);
session.messaging_task = Some(cx.spawn(|session, mut cx| async move {
while let Some(message) = messages_rx.next().await {
session
.update(&mut cx, |session, cx| {
session.route(&message, cx);
})
.ok();
}
}));
// todo!(@rgbkrk): send KernelInfoRequest once our shell channel read/writes are split
// cx.spawn(|this, mut cx| async move {
// cx.background_executor()
// .timer(Duration::from_millis(120))
// .await;
// this.update(&mut cx, |this, cx| {
// this.send(KernelInfoRequest {}.into(), cx).ok();
// })
// .ok();
// })
// .detach();
session.kernel(Kernel::RunningKernel(kernel), cx);
})
.ok();
}
Err(err) => {
this.update(&mut cx, |session, cx| {
session.kernel(Kernel::ErroredLaunch(err.to_string()), cx);
session.kernel_errored(err.to_string(), cx);
})
.ok();
}
@@ -389,6 +308,26 @@ impl Session {
cx.notify();
}
pub fn kernel_errored(&mut self, error_message: String, cx: &mut ViewContext<Self>) {
self.kernel(Kernel::ErroredLaunch(error_message.clone()), cx);
self.blocks.values().for_each(|block| {
block.execution_view.update(cx, |execution_view, cx| {
match execution_view.status {
ExecutionStatus::Finished => {
// Do nothing when the output was good
}
_ => {
// All other cases, set the status to errored
execution_view.status =
ExecutionStatus::KernelErrored(error_message.clone())
}
}
cx.notify();
});
});
}
fn on_buffer_event(
&mut self,
buffer: Model<MultiBuffer>,
@@ -559,7 +498,7 @@ impl Session {
}
}
fn route(&mut self, message: &JupyterMessage, cx: &mut ViewContext<Self>) {
pub fn route(&mut self, message: &JupyterMessage, cx: &mut ViewContext<Self>) {
let parent_message_id = match message.parent_header.as_ref() {
Some(header) => &header.msg_id,
None => return,
@@ -646,12 +585,6 @@ impl Session {
// Give the kernel a bit of time to clean up
cx.background_executor().timer(Duration::from_secs(3)).await;
this.update(&mut cx, |session, _cx| {
session.messaging_task.take();
session.process_status_task.take();
})
.ok();
kernel.force_shutdown().ok();
this.update(&mut cx, |session, cx| {
@@ -664,8 +597,6 @@ impl Session {
.detach();
}
_ => {
self.messaging_task.take();
self.process_status_task.take();
self.kernel(Kernel::Shutdown, cx);
}
}
@@ -688,12 +619,6 @@ impl Session {
let message: JupyterMessage = ShutdownRequest { restart: true }.into();
request_tx.try_send(message).ok();
this.update(&mut cx, |session, _cx| {
session.messaging_task.take();
session.process_status_task.take();
})
.ok();
// Wait for kernel to shutdown
cx.background_executor().timer(Duration::from_secs(1)).await;
@@ -711,9 +636,6 @@ impl Session {
.detach();
}
_ => {
// If it's not already running, we can just clean up and start a new kernel
self.messaging_task.take();
self.process_status_task.take();
self.clear_outputs(cx);
self.start_kernel(cx);
}