src/main.rs
audience: ai
Entry point. Loads boot config, produces the TDX
self-quote, connects to the mosaik universe, resolves
the coalition’s Compute module, opens the zipnet
channel, builds the provider-agnostic
Fleet from the operator’s enabled
backends, and hands everything to the provider loop.
//! compute-bridge — a TDX-attested Compute-module
//! provider that provisions workloads across AWS, GCP,
//! Azure, and bare-metal backends.
//!
//! Prototype. See README.md for status.
mod backends;
mod config;
mod dashboard;
mod provider;
mod receipt;
mod tdx;
mod zipnet_io;
use std::sync::Arc;
use anyhow::Context;
use coalition::CoalitionConfig;
use mosaik::Network;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt()
.with_env_filter(
tracing_subscriber::EnvFilter::try_from_default_env()
.unwrap_or_else(|_| "info,compute_bridge=debug".into()),
)
.init();
// 1. Load boot config (per-backend credentials,
// coalition reference, operator knobs).
let cfg = config::BootConfig::load_from_env()
.context("loading boot config")?;
tracing::info!(
coalition = %cfg.coalition.name,
backends = %summarise_backends(&cfg.backends),
"compute-bridge starting",
);
// 2. Produce the TDX attestation. Without a matching
// MR_TD the provider card the coalition sees will
// not match this binary and the Compute committee
// will reject registration.
let tdx_quote = tdx::self_quote().await
.context("producing TDX self-quote")?;
// 3. Bring up the shared mosaik network handle.
let network = Arc::new(
Network::new(mosaik::builder::UNIVERSE).await
.context("connecting to mosaik universe")?,
);
// 4. Resolve the Compute module on the coalition the
// operator points us at. A coalition without a
// Compute module is fatal — nothing to provide for.
let compute_cfg = cfg.coalition.compute()
.context("coalition does not ship a Compute module")?;
// 5. Zipnet channel for anonymised request/reply.
let zipnet = zipnet_io::ZipnetChannel::open(
&network,
&cfg.zipnet,
).await?;
// 6. Build the provider-agnostic Fleet from the
// operator's enabled backends. At least one must
// be configured.
let fleet = backends::Fleet::from_boot_config(&cfg.backends).await?;
// 7. Spawn the operator dashboard on localhost.
// Binds only to 127.0.0.1; access is via the
// operator's SSH tunnel into the TDX guest.
// Shows aggregate state only — never the
// requester identity, prompt, or image contents.
let dashboard = std::sync::Arc::new(
dashboard::Dashboard::new(&cfg.dashboard, fleet.clone()),
);
tokio::spawn({
let d = dashboard.clone();
async move {
if let Err(err) = d.spawn().await {
tracing::warn!(error = %err, "dashboard failed to start");
}
}
});
// 8. Provider loop: register the provider card,
// accept grants, provision, return receipts,
// emit usage logs. Reports observable events
// to the dashboard as it runs.
let provider = provider::Provider::new(
network.clone(),
compute_cfg,
tdx_quote,
zipnet,
fleet,
cfg.provider.clone(),
dashboard,
);
provider.run().await
}
fn summarise_backends(cfg: &config::BackendsBootConfig) -> String {
let mut parts = Vec::new();
if cfg.aws.is_some() { parts.push("aws"); }
if cfg.gcp.is_some() { parts.push("gcp"); }
if cfg.azure.is_some() { parts.push("azure"); }
if cfg.baremetal.is_some() { parts.push("baremetal"); }
parts.join(",")
}
Up: compute-bridge.