src/backends/gcp.rs
audience: ai
Google Compute Engine backend. Structurally parallel to the AWS backend: per-region client cache, per-grant SSH key injection via instance metadata, cloud-init that fetches the requester’s image by post-hash.
Reports tdx_capable = true when the operator lists
TDX-enabled Confidential-VM machine types in
[gcp.tdx_machine_types] (e.g. "c3-standard-4").
Grants that declare manifest.tdx = Required are
provisioned on those machine types with
confidentialInstanceConfig set; non-TDX grants use
the broader machine_families list.
//! Google Compute Engine backend.
//!
//! Structurally identical to the AWS backend: per-region
//! client cache, per-grant SSH key injection via
//! instance metadata, cloud-init that fetches the
//! requester's image by post-hash.
//!
//! GCP's Confidential VM offering (Intel TDX) is the
//! interesting capability here: operators can set
//! `[gcp.tdx_machine_types]` in their boot config to
//! declare which GCE machine types are TDX-enabled, and
//! the backend will report `tdx_capable = true`.
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use coalition_compute::{AlmanacTick, ComputeGrant, UsageMetrics};
use crate::backends::{Backend, Capabilities, ProvisionedInstance};
use crate::config::GcpBootConfig;
use crate::zipnet_io::Envelope;
pub struct GcpBackend {
cfg: GcpBootConfig,
// TODO: cache google_cloud_* Compute clients per region.
}
impl GcpBackend {
pub async fn new(cfg: &GcpBootConfig) -> anyhow::Result<Self> {
Ok(Self { cfg: cfg.clone() })
}
}
#[async_trait]
impl Backend for GcpBackend {
fn name(&self) -> &'static str { "gcp" }
async fn capabilities(&self) -> anyhow::Result<Capabilities> {
Ok(Capabilities {
regions: self.cfg.regions.clone(),
tdx_capable: !self.cfg.tdx_machine_types.is_empty(),
max_cpu_millicores: u32::MAX,
max_ram_mib: u32::MAX,
})
}
fn can_satisfy(&self, grant: &ComputeGrant<'_>) -> bool {
// TODO: consult manifest: if tdx_required but
// tdx_machine_types is empty, return false.
let _ = grant;
true
}
async fn provision(
&self,
grant: &ComputeGrant<'_>,
envelope: &Envelope,
) -> anyhow::Result<ProvisionedInstance> {
// Real flow:
// 1. Resolve the target region (envelope hint or fallback).
// 2. Pick a machine_type: the smallest one in
// self.cfg.machine_families satisfying the
// grant's CPU/RAM. If the grant requires TDX,
// restrict to self.cfg.tdx_machine_types.
// 3. Generate a per-grant ed25519 SSH key.
// 4. compute.instances.insert({
// name: per-grant,
// machineType, confidentialInstanceConfig (if TDX),
// networkInterfaces: [ephemeral public IP],
// metadata: { ssh-keys, startup-script },
// shieldedInstanceConfig,
// })
// 5. Poll until RUNNING.
// 6. Return ProvisionedInstance.
let _ = (grant, envelope);
Err(anyhow!(
"GcpBackend::provision is a prototype stub; implement via \
the google-cloud-googleapis Compute Engine client"
))
}
async fn watch_until_exit(
&self,
instance: &ProvisionedInstance,
valid_to: AlmanacTick,
) -> anyhow::Result<UsageMetrics> {
let _ = (instance, valid_to);
Err(anyhow!(
"GcpBackend::watch_until_exit is a prototype stub"
))
}
async fn terminate(&self, instance: &ProvisionedInstance) -> anyhow::Result<()> {
let _ = instance;
Ok(())
}
}
Up: compute-bridge → backends.