From 03a7cc56055022d1b45f06a211b258139c1b3805 Mon Sep 17 00:00:00 2001 From: James Andariese Date: Wed, 1 Nov 2023 18:54:29 -0500 Subject: [PATCH] clean up --- src/main.rs | 122 +++++++++++++++++++++++++--------------------------- 1 file changed, 59 insertions(+), 63 deletions(-) diff --git a/src/main.rs b/src/main.rs index 04cf191..560e665 100644 --- a/src/main.rs +++ b/src/main.rs @@ -256,8 +256,12 @@ async fn setup_pod(ctx: &mut AppContext) -> Result<(Api, String)> { let existing_pvc = pvcs_api.get_opt(kube_pvc).await?; if let None = existing_pvc { info!("pvc doesn't exist yet. creating now."); - let mut repo_pvc = PersistentVolumeClaim { - metadata: ObjectMeta::default(), + let repo_pvc = PersistentVolumeClaim { + metadata: ObjectMeta{ + name: Some(kube_pvc.to_owned()), + namespace: Some(kube_ns.to_owned()), + ..ObjectMeta::default() + }, spec: Some(PersistentVolumeClaimSpec { access_modes: Some(vec!["ReadWriteOnce".to_owned()]), resources: Some(ResourceRequirements { @@ -274,10 +278,6 @@ async fn setup_pod(ctx: &mut AppContext) -> Result<(Api, String)> { }), status: None, }; - let mut meta = ObjectMeta::default(); - meta.name = Some(kube_pvc.to_owned()); - meta.namespace = Some(kube_ns.to_owned()); - repo_pvc.metadata = meta; let pp = PostParams::default(); let created_pvc = pvcs_api.create(&pp, &repo_pvc).await?; debug!("created pvc: {created_pvc:#?}"); @@ -285,54 +285,60 @@ async fn setup_pod(ctx: &mut AppContext) -> Result<(Api, String)> { debug!("{:#?}", existing_pvc); // create the worker pod - let mut worker_pod = Pod::default(); - worker_pod.metadata.name = Some(kube_worker_name.to_owned()); - worker_pod.metadata.namespace = Some(kube_ns.to_owned()); - { - let mut labels = BTreeMap::new(); - for (k, v) in kube_pod_labels.iter() { - let kk = k.to_owned().to_owned(); - let vv = v.to_owned().to_owned(); - labels.insert(kk, vv); - } - worker_pod.metadata.labels = Some(labels); - } - { - let mut spec = PodSpec::default(); - spec.restart_policy = Some("Never".to_owned()); - { - let mut container = Container::default(); - container.name = KUBE_CONTAINER_NAME.to_owned(); - container.command = Some(vec![ - kube_shell_executable.to_owned(), - kube_shell_parameters.to_owned(), - kube_shell_sleeper_command.to_owned(), - ]); - container.image = Some(kube_image.to_owned()); - container.working_dir = Some(kube_repo_mount_path.to_owned()); - container.image_pull_policy = Some("IfNotPresent".to_owned()); - { - let mut volume_mount = VolumeMount::default(); - volume_mount.mount_path = kube_repo_mount_path.to_owned(); - volume_mount.name = "repo".to_owned(); - container.volume_mounts = Some(vec![volume_mount]); - } - spec.containers = vec![container]; - } - { - let mut volume = Volume::default(); - volume.name = "repo".to_owned(); - { - let mut pvcs = PersistentVolumeClaimVolumeSource::default(); - pvcs.claim_name = kube_pvc.to_owned(); - volume.persistent_volume_claim = Some(pvcs); - } - spec.volumes = Some(vec![volume]); - } - worker_pod.spec = Some(spec); - } - // debug!("Pod: {:?}", worker_pod); + let worker_pod = Pod{ + metadata: ObjectMeta{ + name: Some(kube_worker_name.to_owned()), + namespace: Some(kube_ns.to_owned()), + labels: Some({ + let mut labels = BTreeMap::new(); + for (k, v) in kube_pod_labels.iter() { + let kk = k.to_owned().to_owned(); + let vv = v.to_owned().to_owned(); + labels.insert(kk, vv); + } + labels + }), + ..ObjectMeta::default() + }, + spec: Some(PodSpec { + restart_policy: Some("Never".to_owned()), + containers: vec![Container{ + name: KUBE_CONTAINER_NAME.to_owned(), + command: Some(vec![ + kube_shell_executable.to_owned(), + kube_shell_parameters.to_owned(), + kube_shell_sleeper_command.to_owned(), + ]), + image: Some(kube_image.to_owned()), + working_dir: Some(kube_repo_mount_path.to_owned()), + image_pull_policy: Some("IfNotPresent".to_owned()), + volume_mounts: Some(vec![ + VolumeMount{ + mount_path: kube_repo_mount_path.to_owned(), + name: "repo".to_owned(), + ..VolumeMount::default() + } + ]), + ..Container::default() + }], + volumes: Some(vec![ + Volume{ + persistent_volume_claim: Some( + PersistentVolumeClaimVolumeSource { + claim_name: kube_pvc.to_owned(), + read_only: Some(false), + ..PersistentVolumeClaimVolumeSource::default() + } + ), + ..Volume::default() + }, + ]), + ..PodSpec::default() + }), + ..Pod::default() + }; + let mut lp = ListParams::default(); let mut ls: String = String::with_capacity(kube_pod_labels.len() * 100); for (k, v) in kube_pod_labels { @@ -345,7 +351,6 @@ async fn setup_pod(ctx: &mut AppContext) -> Result<(Api, String)> { debug!("list params: {lp:#?}"); let worker_pods = pods_api.list(&lp).await?; - // debug!("worker_pods: {worker_pods:#?}"); // 1: if there is >1 pod, bail // 2: if there is 1 pod and that pod is running or pending, use it @@ -433,13 +438,6 @@ async fn do_git(ctx: &AppContext, pods_api: Api, kube_worker_name: String) let mut ttyout = tokio::io::stdout(); let mut ttyin = tokio::io::stdin(); - // tokio::spawn(async { - // loop { - // sleep(Duration::from_secs(1)).await; - // debug!("ping"); - // }; - // }.instrument(error_span!("pinger"))); - let connect_cmd = negotiate_git_protocol(&mut ttyout, &mut ttyin) .await? .ok_or(anyhow!( @@ -454,7 +452,6 @@ async fn do_git(ctx: &AppContext, pods_api: Api, kube_worker_name: String) .stdout(true) .stderr(true) .container(KUBE_CONTAINER_NAME.to_owned()); - // let (ready_tx, ready_rx) = oneshot::channel::<()>(); let mut stuff = pods_api .exec(&kube_worker_name, vec!["sh", "-c", &gitcommand], &ap) .await?; @@ -469,7 +466,6 @@ async fn do_git(ctx: &AppContext, pods_api: Api, kube_worker_name: String) .stderr() .ok_or(ApplicationError::PodCouldNotOpenStderr)?; let mut poderr = tokio_util::io::ReaderStream::new(poderr); - // ready_tx.send(()).expect("failed to send ready check"); let barrier = Arc::new(tokio::sync::Barrier::new(4));