Compare commits

...

4 commits

Author SHA1 Message Date
d7f4d877b6
feat(hostd): varlink interfaced host controller to manage machine configuration and boot mgmt
All checks were successful
ci/woodpecker/pr/ci Pipeline was successful
2025-01-14 15:07:54 +01:00
092a66a3fb #7 feat: Add user resource w/database as storage
All checks were successful
ci/woodpecker/push/ci Pipeline was successful
The User resource itself is just more or less a place holder for now.

But the idea is to get some proper e2e example to build off of with the basics in place:

- [x] sqlx for database queries
- [x] sqlx schema migration
- [x] type safe dropshot api

Reviewed-on: #7
2025-01-14 14:25:52 +01:00
9b7e1fb226
feat: Add user resource w/database as storage
All checks were successful
ci/woodpecker/pr/ci Pipeline was successful
2025-01-14 14:24:35 +01:00
3b04b82998
chore(clippy): cleanup
All checks were successful
ci/woodpecker/push/ci Pipeline was successful
2025-01-12 14:38:08 +01:00
27 changed files with 1964 additions and 83 deletions

8
.cargo/audit.toml Normal file
View file

@ -0,0 +1,8 @@
[advisories]
ignore = [
# Advisory about a vulnerability in rsa, which we don't use, but comes via sqlx due
# to a bug in cargo. For context, see:
# https://github.com/launchbadge/sqlx/issues/2911
# and https://github.com/rust-lang/cargo/issues/10801
"RUSTSEC-2023-0071"
]

932
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -4,6 +4,7 @@ members = [
"agent",
"controller",
"instrumentation",
"hostd",
"trace-request",
"xtask",
]
@ -32,6 +33,7 @@ clap = { version = "4.5.23", features = [
"string",
] }
dropshot = "0.15.1"
futures = "0.3"
http = "1.2.0"
once_cell = "1.20.2"
progenitor = "0.8.0"

View file

@ -7,6 +7,7 @@ version.workspace = true
[dependencies]
anyhow.workspace = true
clap.workspace = true
futures.workspace = true
instrumentation = { path = "../instrumentation" }
progenitor.workspace = true
reqwest.workspace = true
@ -14,6 +15,7 @@ schemars.workspace = true
serde.workspace = true
tokio.workspace = true
tracing.workspace = true
uuid.workspace = true
[package.metadata.cargo-machete]
ignored = ["reqwest", "serde"]

135
api.json
View file

@ -5,6 +5,96 @@
"version": "1.0.0"
},
"paths": {
"/users": {
"get": {
"tags": [
"user"
],
"summary": "List users",
"operationId": "list_users",
"parameters": [
{
"in": "query",
"name": "limit",
"description": "Maximum number of items returned by a single call",
"schema": {
"nullable": true,
"type": "integer",
"format": "uint32",
"minimum": 1
}
},
{
"in": "query",
"name": "page_token",
"description": "Token returned by previous call to retrieve the subsequent page",
"schema": {
"nullable": true,
"type": "string"
}
}
],
"responses": {
"200": {
"description": "successful operation",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/UserResultsPage"
}
}
}
},
"4XX": {
"$ref": "#/components/responses/Error"
},
"5XX": {
"$ref": "#/components/responses/Error"
}
},
"x-dropshot-pagination": {
"required": []
}
}
},
"/users/{userId}": {
"get": {
"tags": [
"user"
],
"summary": "Fetch user info.",
"operationId": "get_user_by_id",
"parameters": [
{
"in": "path",
"name": "userId",
"required": true,
"schema": {
"type": "string",
"format": "uuid"
}
}
],
"responses": {
"200": {
"description": "successful operation",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/User"
}
}
}
},
"4XX": {
"$ref": "#/components/responses/Error"
},
"5XX": {
"$ref": "#/components/responses/Error"
}
}
}
},
"/version": {
"get": {
"summary": "Fetch version info.",
@ -51,6 +141,44 @@
"request_id"
]
},
"User": {
"description": "User",
"type": "object",
"properties": {
"id": {
"type": "string",
"format": "uuid"
},
"name": {
"type": "string"
}
},
"required": [
"id",
"name"
]
},
"UserResultsPage": {
"description": "A single page of results",
"type": "object",
"properties": {
"items": {
"description": "list of items on this page of results",
"type": "array",
"items": {
"$ref": "#/components/schemas/User"
}
},
"next_page": {
"nullable": true,
"description": "token used to fetch the next page of results (if any)",
"type": "string"
}
},
"required": [
"items"
]
},
"VersionInfo": {
"description": "Version and build information",
"type": "object",
@ -80,5 +208,10 @@
}
}
}
}
},
"tags": [
{
"name": "user"
}
]
}

View file

@ -0,0 +1,47 @@
{
"db_name": "PostgreSQL",
"query": "SELECT * FROM users WHERE id > coalesce($1, '00000000-0000-0000-0000-000000000000'::UUID) ORDER BY id LIMIT $2",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Uuid"
},
{
"ordinal": 1,
"name": "name",
"type_info": "Varchar"
},
{
"ordinal": 2,
"name": "time_deleted",
"type_info": "Timestamptz"
},
{
"ordinal": 3,
"name": "time_created",
"type_info": "Timestamptz"
},
{
"ordinal": 4,
"name": "time_modified",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Uuid",
"Int8"
]
},
"nullable": [
false,
false,
true,
false,
false
]
},
"hash": "40dee0d539971f95bb3dc2ba4c49d5910bfdb2a6c9b82ddb296854973369594c"
}

View file

@ -0,0 +1,46 @@
{
"db_name": "PostgreSQL",
"query": "SELECT * FROM users WHERE id = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Uuid"
},
{
"ordinal": 1,
"name": "name",
"type_info": "Varchar"
},
{
"ordinal": 2,
"name": "time_deleted",
"type_info": "Timestamptz"
},
{
"ordinal": 3,
"name": "time_created",
"type_info": "Timestamptz"
},
{
"ordinal": 4,
"name": "time_modified",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Uuid"
]
},
"nullable": [
false,
false,
true,
false,
false
]
},
"hash": "843923b9a0257cf80f1dff554e7dc8fdfc05f489328e8376513124dfb42996e3"
}

View file

@ -15,10 +15,14 @@ schemars.workspace = true
serde.workspace = true
slog-async.workspace = true
slog.workspace = true
sqlx = { version = "0.8.3", default-features = false, features = [
"macros", "migrate", "postgres", "runtime-tokio", "tls-rustls", "time", "uuid"
] }
tokio.workspace = true
trace-request = { path = "../trace-request" }
tracing-slog.workspace = true
tracing.workspace = true
uuid.workspace = true
[package.metadata.cargo-machete]
ignored = ["http"]

5
controller/build.rs Normal file
View file

@ -0,0 +1,5 @@
// generated by `sqlx migrate build-script`
fn main() {
// trigger recompilation when a new migration is added
println!("cargo:rerun-if-changed=migrations");
}

View file

@ -0,0 +1,7 @@
CREATE TABLE IF NOT EXISTS patagia.public.Users(
id UUID PRIMARY KEY,
name VARCHAR(63) NOT NULL,
time_deleted TIMESTAMP WITH TIME ZONE, -- non-NULL if deleted
time_created TIMESTAMP WITH TIME ZONE NOT NULL,
time_modified TIMESTAMP WITH TIME ZONE NOT NULL
);

View file

@ -4,12 +4,14 @@ use dropshot::ApiDescription;
use std::sync::Arc;
use crate::context::ControllerContext;
use crate::user;
use crate::version;
type ControllerApiDescription = ApiDescription<Arc<ControllerContext>>;
pub fn api() -> Result<ControllerApiDescription> {
let mut api = ControllerApiDescription::new();
user::register_api(&mut api)?;
api.register(version::version)?;
Ok(api)
}

View file

@ -1,8 +1,8 @@
use anyhow::{anyhow, Result};
use clap::Parser;
use dropshot::{ConfigDropshot, ServerBuilder};
use slog::Drain;
use sqlx::postgres::PgPool;
use tracing_slog::TracingSlogDrain;
use std::net::SocketAddr;
@ -36,6 +36,13 @@ struct Cli {
env = "LISTEN_ADDRESS"
)]
listen_address: String,
#[arg(
long = "database-url",
default_value = "postgresql://localhost/patagia",
env = "DATABASE_URL"
)]
database_url: Option<String>,
}
#[tokio::main]
@ -57,7 +64,19 @@ async fn main() -> Result<()> {
slog::Logger::root(async_drain, slog::o!())
};
let ctx = ControllerContext::new();
let database_url = args.database_url.unwrap();
tracing::info!(
database_url,
listen_address = args.listen_address,
"Starting server"
);
let pg = PgPool::connect(&database_url).await?;
sqlx::migrate!().run(&pg).await?;
let ctx = ControllerContext::new(pg);
let api = api::api()?;
ServerBuilder::new(api, Arc::new(ctx), logger)
.config(config)

View file

@ -1,13 +1,11 @@
pub struct ControllerContext {}
use sqlx::postgres::PgPool;
pub struct ControllerContext {
pub pg_pool: PgPool,
}
impl ControllerContext {
pub fn new() -> ControllerContext {
ControllerContext {}
}
}
impl Default for ControllerContext {
fn default() -> Self {
Self::new()
pub fn new(pg_pool: PgPool) -> ControllerContext {
ControllerContext { pg_pool }
}
}

View file

@ -1,4 +1,5 @@
pub mod api;
pub mod context;
mod user;
mod version;

110
controller/src/user/api.rs Normal file
View file

@ -0,0 +1,110 @@
use dropshot::{
endpoint, EmptyScanParams, HttpError, HttpResponseOk, PaginationParams, Path, Query,
RequestContext, ResultsPage, WhichPage,
};
use dropshot::{ApiDescription, ApiDescriptionRegisterError};
use schemars::JsonSchema;
use serde::Deserialize;
use serde::Serialize;
use trace_request::trace_request;
use uuid::Uuid;
use std::sync::Arc;
use super::User;
use crate::context::ControllerContext;
#[derive(Deserialize, JsonSchema, Serialize)]
#[serde(rename_all = "camelCase")]
struct UsersPathParams {
user_id: Uuid,
}
#[derive(Deserialize, JsonSchema, Serialize)]
#[serde(rename_all = "camelCase")]
struct UserPage {
user_id: Uuid,
}
pub fn register_api(
api: &mut ApiDescription<Arc<ControllerContext>>,
) -> Result<(), ApiDescriptionRegisterError> {
api.register(get_user_by_id)?;
api.register(list_users)
}
/// Fetch user info.
#[endpoint {
method = GET,
path = "/users/{userId}",
tags = [ "user" ],
}]
#[trace_request]
async fn get_user_by_id(
rqctx: RequestContext<Arc<ControllerContext>>,
params: Path<UsersPathParams>,
) -> Result<HttpResponseOk<User>, HttpError> {
let id = params.into_inner().user_id;
tracing::debug!(id = id.to_string(), "Getting user by id");
let pg = rqctx.context().pg_pool.to_owned();
let rec = sqlx::query!(r#"SELECT * FROM users WHERE id = $1"#, id)
.fetch_one(&pg)
.await
.map_err(|e| match e {
sqlx::Error::RowNotFound => {
HttpError::for_not_found(None, format!("User not found by id: {:?}", id))
}
err => HttpError::for_internal_error(format!("Error: {}", err)),
})?;
let user = User {
id: rec.id,
name: rec.name,
};
Ok(HttpResponseOk(user))
}
/// List users
#[endpoint {
method = GET,
path = "/users",
tags = [ "user" ],
}]
#[trace_request]
async fn list_users(
rqctx: RequestContext<Arc<ControllerContext>>,
query: Query<PaginationParams<EmptyScanParams, UserPage>>,
) -> Result<HttpResponseOk<ResultsPage<User>>, HttpError> {
let pag_params = query.into_inner();
let limit = rqctx.page_limit(&pag_params)?.get() as i64;
let pg = rqctx.context().pg_pool.to_owned();
let last_seen = match &pag_params.page {
WhichPage::Next(UserPage { user_id: id }) => Some(id),
_ => None,
};
let users = sqlx::query!(
r#"SELECT * FROM users WHERE id > coalesce($1, '00000000-0000-0000-0000-000000000000'::UUID) ORDER BY id LIMIT $2"#,
last_seen,
limit
)
.fetch_all(&pg)
.await
.map_err(|e| HttpError::for_internal_error(format!("Error: {}", e)))?
.into_iter()
.map(|rec| User {
id: rec.id,
name: rec.name,
})
.collect();
Ok(HttpResponseOk(ResultsPage::new(
users,
&EmptyScanParams {},
|u: &User, _| UserPage { user_id: u.id },
)?))
}

View file

@ -0,0 +1,14 @@
use schemars::JsonSchema;
use serde::Serialize;
use uuid::Uuid;
mod api;
pub use self::api::register_api;
/// User
#[derive(Serialize, JsonSchema)]
struct User {
id: Uuid,
name: String,
}

View file

@ -49,6 +49,8 @@
root = ./.;
fileset = pkgs.lib.fileset.unions [
./api.json
./controller/.sqlx
./controller/migrations
(craneLib.fileset.commonCargoSources ./.)
];
};
@ -116,6 +118,7 @@
formatter =
(treefmt-nix.lib.evalModule pkgs {
projectRootFile = "flake.nix";
programs = {
nixfmt.enable = true;
nixfmt.package = pkgs.nixfmt-rfc-style;
@ -141,6 +144,8 @@
just
nixfmt-rfc-style
rust-dev-toolchain
sqls
sqlx-cli
watchexec
]
++ commonArgs.buildInputs;

1
hostd/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
/target

12
hostd/Cargo.toml Normal file
View file

@ -0,0 +1,12 @@
[package]
name = "hostd"
version.workspace = true
edition.workspace = true
[dependencies]
anyhow.workspace = true
tokio.workspace = true
varlink = "11.0.1"
[build-dependencies]
varlink_generator = "10.1.0"

6
hostd/build.rs Normal file
View file

@ -0,0 +1,6 @@
extern crate varlink_generator;
fn main() {
varlink_generator::cargo_build_tosource("src/io.systemd.Hostname.varlink", true);
varlink_generator::cargo_build_tosource("src/io.patagia.Hostd.varlink", true);
}

View file

@ -0,0 +1,27 @@
interface io.patagia.Hostd
type Label (
key: string,
value: string
)
type PatagiaAgentConfig (
url: string,
extraMounts: [string]string
)
type Machine(
machineId: string,
nodeLabels: ?[]Label,
patagiaAgent: ?PatagiaAgentConfig
)
method Describe() -> (
machine: Machine
)
method Apply(
machine: Machine
) -> ()
error InvalidMachineConfig()

View file

@ -0,0 +1,32 @@
interface io.systemd.Hostname
method Describe() -> (
Hostname: string,
StaticHostname: ?string,
PrettyHostname: ?string,
DefaultHostname: ?string,
HostnameSource: string,
IconName: ?string,
Chassis: ?string,
Deployment: ?string,
Location: ?string,
KernelName: string,
KernelRelease: string,
KernelVersion: string,
OperatingSystemPrettyName: ?string,
OperatingSystemCPEName: ?string,
OperatingSystemHomeURL: ?string,
OperatingSystemSupportEnd: ?int,
OperatingSystemReleaseData: ?[]string,
MachineInformationData: ?[]string,
HardwareVendor: ?string,
HardwareModel: ?string,
HardwareSerial: ?string,
FirmwareVersion: ?string,
FirmwareVendor: ?string,
FirmwareDate: ?int,
MachineID: string,
BootID: string,
ProductUUID: ?string,
VSockCID: ?int
)

View file

@ -0,0 +1,264 @@
#![doc = "This file was automatically generated by the varlink rust generator"]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
use serde_derive::{Deserialize, Serialize};
use std::io::BufRead;
use std::sync::{Arc, RwLock};
use varlink::{self, CallTrait};
#[allow(dead_code)]
#[derive(Clone, PartialEq, Debug)]
#[allow(clippy::enum_variant_names)]
pub enum ErrorKind {
Varlink_Error,
VarlinkReply_Error,
InvalidMachineConfig(Option<InvalidMachineConfig_Args>),
}
impl ::std::fmt::Display for ErrorKind {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
match self {
ErrorKind::Varlink_Error => write!(f, "Varlink Error"),
ErrorKind::VarlinkReply_Error => write!(f, "Varlink error reply"),
ErrorKind::InvalidMachineConfig(v) => {
write!(f, "io.patagia.Hostd.InvalidMachineConfig: {:#?}", v)
}
}
}
}
pub struct Error(
pub ErrorKind,
pub Option<Box<dyn std::error::Error + 'static + Send + Sync>>,
pub Option<&'static str>,
);
impl Error {
#[allow(dead_code)]
pub fn kind(&self) -> &ErrorKind {
&self.0
}
}
impl From<ErrorKind> for Error {
fn from(e: ErrorKind) -> Self {
Error(e, None, None)
}
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.1
.as_ref()
.map(|e| e.as_ref() as &(dyn std::error::Error + 'static))
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
std::fmt::Display::fmt(&self.0, f)
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
use std::error::Error as StdError;
if let Some(ref o) = self.2 {
std::fmt::Display::fmt(o, f)?;
}
std::fmt::Debug::fmt(&self.0, f)?;
if let Some(e) = self.source() {
std::fmt::Display::fmt("\nCaused by:\n", f)?;
std::fmt::Debug::fmt(&e, f)?;
}
Ok(())
}
}
#[allow(dead_code)]
pub type Result<T> = std::result::Result<T, Error>;
impl From<varlink::Error> for Error {
fn from(e: varlink::Error) -> Self {
match e.kind() {
varlink::ErrorKind::VarlinkErrorReply(r) => Error(
ErrorKind::from(r),
Some(Box::from(e)),
Some(concat!(file!(), ":", line!(), ": ")),
),
_ => Error(
ErrorKind::Varlink_Error,
Some(Box::from(e)),
Some(concat!(file!(), ":", line!(), ": ")),
),
}
}
}
#[allow(dead_code)]
impl Error {
pub fn source_varlink_kind(&self) -> Option<&varlink::ErrorKind> {
use std::error::Error as StdError;
let mut s: &dyn StdError = self;
while let Some(c) = s.source() {
let k = self
.source()
.and_then(|e| e.downcast_ref::<varlink::Error>())
.map(|e| e.kind());
if k.is_some() {
return k;
}
s = c;
}
None
}
}
impl From<&varlink::Reply> for ErrorKind {
#[allow(unused_variables)]
fn from(e: &varlink::Reply) -> Self {
match e {
varlink::Reply {
error: Some(ref t), ..
} if t == "io.patagia.Hostd.InvalidMachineConfig" => match e {
varlink::Reply {
parameters: Some(p),
..
} => match serde_json::from_value(p.clone()) {
Ok(v) => ErrorKind::InvalidMachineConfig(v),
Err(_) => ErrorKind::InvalidMachineConfig(None),
},
_ => ErrorKind::InvalidMachineConfig(None),
},
_ => ErrorKind::VarlinkReply_Error,
}
}
}
pub trait VarlinkCallError: varlink::CallTrait {
fn reply_invalid_machine_config(&mut self) -> varlink::Result<()> {
self.reply_struct(varlink::Reply::error(
"io.patagia.Hostd.InvalidMachineConfig",
None,
))
}
}
impl<'a> VarlinkCallError for varlink::Call<'a> {}
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub struct r#Label {
pub r#key: String,
pub r#value: String,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub struct r#Machine {
pub r#machineId: String,
pub r#nodeLabels: Option<Vec<Label>>,
pub r#patagiaAgent: Option<PatagiaAgentConfig>,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub struct r#PatagiaAgentConfig {
pub r#url: String,
pub r#extraMounts: varlink::StringHashMap<String>,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub struct InvalidMachineConfig_Args {}
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub struct Apply_Reply {}
impl varlink::VarlinkReply for Apply_Reply {}
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub struct Apply_Args {
pub r#machine: Machine,
}
pub trait Call_Apply: VarlinkCallError {
fn reply(&mut self) -> varlink::Result<()> {
self.reply_struct(varlink::Reply::parameters(None))
}
}
impl<'a> Call_Apply for varlink::Call<'a> {}
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub struct Describe_Reply {
pub r#machine: Machine,
}
impl varlink::VarlinkReply for Describe_Reply {}
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub struct Describe_Args {}
pub trait Call_Describe: VarlinkCallError {
fn reply(&mut self, r#machine: Machine) -> varlink::Result<()> {
self.reply_struct(Describe_Reply { r#machine }.into())
}
}
impl<'a> Call_Describe for varlink::Call<'a> {}
pub trait VarlinkInterface {
fn apply(&self, call: &mut dyn Call_Apply, r#machine: Machine) -> varlink::Result<()>;
fn describe(&self, call: &mut dyn Call_Describe) -> varlink::Result<()>;
fn call_upgraded(
&self,
_call: &mut varlink::Call,
_bufreader: &mut dyn BufRead,
) -> varlink::Result<Vec<u8>> {
Ok(Vec::new())
}
}
pub trait VarlinkClientInterface {
fn apply(&mut self, r#machine: Machine) -> varlink::MethodCall<Apply_Args, Apply_Reply, Error>;
fn describe(&mut self) -> varlink::MethodCall<Describe_Args, Describe_Reply, Error>;
}
#[allow(dead_code)]
pub struct VarlinkClient {
connection: Arc<RwLock<varlink::Connection>>,
}
impl VarlinkClient {
#[allow(dead_code)]
pub fn new(connection: Arc<RwLock<varlink::Connection>>) -> Self {
VarlinkClient { connection }
}
}
impl VarlinkClientInterface for VarlinkClient {
fn apply(&mut self, r#machine: Machine) -> varlink::MethodCall<Apply_Args, Apply_Reply, Error> {
varlink::MethodCall::<Apply_Args, Apply_Reply, Error>::new(
self.connection.clone(),
"io.patagia.Hostd.Apply",
Apply_Args { r#machine },
)
}
fn describe(&mut self) -> varlink::MethodCall<Describe_Args, Describe_Reply, Error> {
varlink::MethodCall::<Describe_Args, Describe_Reply, Error>::new(
self.connection.clone(),
"io.patagia.Hostd.Describe",
Describe_Args {},
)
}
}
#[allow(dead_code)]
pub struct VarlinkInterfaceProxy {
inner: Box<dyn VarlinkInterface + Send + Sync>,
}
#[allow(dead_code)]
pub fn new(inner: Box<dyn VarlinkInterface + Send + Sync>) -> VarlinkInterfaceProxy {
VarlinkInterfaceProxy { inner }
}
impl varlink::Interface for VarlinkInterfaceProxy {
fn get_description(&self) -> &'static str {
"interface io.patagia.Hostd\n\ntype Label (\n key: string,\n value: string\n)\n\ntype PatagiaAgentConfig (\n url: string,\n extraMounts: [string]string\n)\n\ntype Machine(\n machineId: string,\n nodeLabels: ?[]Label,\n patagiaAgent: ?PatagiaAgentConfig\n)\n\nmethod Describe() -> (\n machine: Machine\n)\n\nmethod Apply(\n machine: Machine\n) -> ()\n\nerror InvalidMachineConfig()\n"
}
fn get_name(&self) -> &'static str {
"io.patagia.Hostd"
}
fn call_upgraded(
&self,
call: &mut varlink::Call,
bufreader: &mut dyn BufRead,
) -> varlink::Result<Vec<u8>> {
self.inner.call_upgraded(call, bufreader)
}
fn call(&self, call: &mut varlink::Call) -> varlink::Result<()> {
let req = call.request.unwrap();
match req.method.as_ref() {
"io.patagia.Hostd.Apply" => {
if let Some(args) = req.parameters.clone() {
let args: Apply_Args = match serde_json::from_value(args) {
Ok(v) => v,
Err(e) => {
let es = format!("{}", e);
let _ = call.reply_invalid_parameter(es.clone());
return Err(varlink::context!(varlink::ErrorKind::SerdeJsonDe(es)));
}
};
self.inner
.apply(call as &mut dyn Call_Apply, args.r#machine)
} else {
call.reply_invalid_parameter("parameters".into())
}
}
"io.patagia.Hostd.Describe" => self.inner.describe(call as &mut dyn Call_Describe),
m => call.reply_method_not_found(String::from(m)),
}
}
}

View file

@ -0,0 +1,295 @@
#![doc = "This file was automatically generated by the varlink rust generator"]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
use serde_derive::{Deserialize, Serialize};
use std::io::BufRead;
use std::sync::{Arc, RwLock};
use varlink::{self, CallTrait};
#[allow(dead_code)]
#[derive(Clone, PartialEq, Debug)]
#[allow(clippy::enum_variant_names)]
pub enum ErrorKind {
Varlink_Error,
VarlinkReply_Error,
}
impl ::std::fmt::Display for ErrorKind {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
match self {
ErrorKind::Varlink_Error => write!(f, "Varlink Error"),
ErrorKind::VarlinkReply_Error => write!(f, "Varlink error reply"),
}
}
}
pub struct Error(
pub ErrorKind,
pub Option<Box<dyn std::error::Error + 'static + Send + Sync>>,
pub Option<&'static str>,
);
impl Error {
#[allow(dead_code)]
pub fn kind(&self) -> &ErrorKind {
&self.0
}
}
impl From<ErrorKind> for Error {
fn from(e: ErrorKind) -> Self {
Error(e, None, None)
}
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.1
.as_ref()
.map(|e| e.as_ref() as &(dyn std::error::Error + 'static))
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
std::fmt::Display::fmt(&self.0, f)
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
use std::error::Error as StdError;
if let Some(ref o) = self.2 {
std::fmt::Display::fmt(o, f)?;
}
std::fmt::Debug::fmt(&self.0, f)?;
if let Some(e) = self.source() {
std::fmt::Display::fmt("\nCaused by:\n", f)?;
std::fmt::Debug::fmt(&e, f)?;
}
Ok(())
}
}
#[allow(dead_code)]
pub type Result<T> = std::result::Result<T, Error>;
impl From<varlink::Error> for Error {
fn from(e: varlink::Error) -> Self {
match e.kind() {
varlink::ErrorKind::VarlinkErrorReply(r) => Error(
ErrorKind::from(r),
Some(Box::from(e)),
Some(concat!(file!(), ":", line!(), ": ")),
),
_ => Error(
ErrorKind::Varlink_Error,
Some(Box::from(e)),
Some(concat!(file!(), ":", line!(), ": ")),
),
}
}
}
#[allow(dead_code)]
impl Error {
pub fn source_varlink_kind(&self) -> Option<&varlink::ErrorKind> {
use std::error::Error as StdError;
let mut s: &dyn StdError = self;
while let Some(c) = s.source() {
let k = self
.source()
.and_then(|e| e.downcast_ref::<varlink::Error>())
.map(|e| e.kind());
if k.is_some() {
return k;
}
s = c;
}
None
}
}
impl From<&varlink::Reply> for ErrorKind {
#[allow(unused_variables)]
fn from(e: &varlink::Reply) -> Self {
match e {
_ => ErrorKind::VarlinkReply_Error,
}
}
}
pub trait VarlinkCallError: varlink::CallTrait {}
impl<'a> VarlinkCallError for varlink::Call<'a> {}
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub struct Describe_Reply {
pub r#Hostname: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#StaticHostname: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#PrettyHostname: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#DefaultHostname: Option<String>,
pub r#HostnameSource: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#IconName: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#Chassis: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#Deployment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#Location: Option<String>,
pub r#KernelName: String,
pub r#KernelRelease: String,
pub r#KernelVersion: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#OperatingSystemPrettyName: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#OperatingSystemCPEName: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#OperatingSystemHomeURL: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#OperatingSystemSupportEnd: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#OperatingSystemReleaseData: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#MachineInformationData: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#HardwareVendor: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#HardwareModel: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#HardwareSerial: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#FirmwareVersion: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#FirmwareVendor: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#FirmwareDate: Option<i64>,
pub r#MachineID: String,
pub r#BootID: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#ProductUUID: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub r#VSockCID: Option<i64>,
}
impl varlink::VarlinkReply for Describe_Reply {}
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub struct Describe_Args {}
pub trait Call_Describe: VarlinkCallError {
fn reply(
&mut self,
r#Hostname: String,
r#StaticHostname: Option<String>,
r#PrettyHostname: Option<String>,
r#DefaultHostname: Option<String>,
r#HostnameSource: String,
r#IconName: Option<String>,
r#Chassis: Option<String>,
r#Deployment: Option<String>,
r#Location: Option<String>,
r#KernelName: String,
r#KernelRelease: String,
r#KernelVersion: String,
r#OperatingSystemPrettyName: Option<String>,
r#OperatingSystemCPEName: Option<String>,
r#OperatingSystemHomeURL: Option<String>,
r#OperatingSystemSupportEnd: Option<i64>,
r#OperatingSystemReleaseData: Option<Vec<String>>,
r#MachineInformationData: Option<Vec<String>>,
r#HardwareVendor: Option<String>,
r#HardwareModel: Option<String>,
r#HardwareSerial: Option<String>,
r#FirmwareVersion: Option<String>,
r#FirmwareVendor: Option<String>,
r#FirmwareDate: Option<i64>,
r#MachineID: String,
r#BootID: String,
r#ProductUUID: Option<String>,
r#VSockCID: Option<i64>,
) -> varlink::Result<()> {
self.reply_struct(
Describe_Reply {
r#Hostname,
r#StaticHostname,
r#PrettyHostname,
r#DefaultHostname,
r#HostnameSource,
r#IconName,
r#Chassis,
r#Deployment,
r#Location,
r#KernelName,
r#KernelRelease,
r#KernelVersion,
r#OperatingSystemPrettyName,
r#OperatingSystemCPEName,
r#OperatingSystemHomeURL,
r#OperatingSystemSupportEnd,
r#OperatingSystemReleaseData,
r#MachineInformationData,
r#HardwareVendor,
r#HardwareModel,
r#HardwareSerial,
r#FirmwareVersion,
r#FirmwareVendor,
r#FirmwareDate,
r#MachineID,
r#BootID,
r#ProductUUID,
r#VSockCID,
}
.into(),
)
}
}
impl<'a> Call_Describe for varlink::Call<'a> {}
pub trait VarlinkInterface {
fn describe(&self, call: &mut dyn Call_Describe) -> varlink::Result<()>;
fn call_upgraded(
&self,
_call: &mut varlink::Call,
_bufreader: &mut dyn BufRead,
) -> varlink::Result<Vec<u8>> {
Ok(Vec::new())
}
}
pub trait VarlinkClientInterface {
fn describe(&mut self) -> varlink::MethodCall<Describe_Args, Describe_Reply, Error>;
}
#[allow(dead_code)]
pub struct VarlinkClient {
connection: Arc<RwLock<varlink::Connection>>,
}
impl VarlinkClient {
#[allow(dead_code)]
pub fn new(connection: Arc<RwLock<varlink::Connection>>) -> Self {
VarlinkClient { connection }
}
}
impl VarlinkClientInterface for VarlinkClient {
fn describe(&mut self) -> varlink::MethodCall<Describe_Args, Describe_Reply, Error> {
varlink::MethodCall::<Describe_Args, Describe_Reply, Error>::new(
self.connection.clone(),
"io.systemd.Hostname.Describe",
Describe_Args {},
)
}
}
#[allow(dead_code)]
pub struct VarlinkInterfaceProxy {
inner: Box<dyn VarlinkInterface + Send + Sync>,
}
#[allow(dead_code)]
pub fn new(inner: Box<dyn VarlinkInterface + Send + Sync>) -> VarlinkInterfaceProxy {
VarlinkInterfaceProxy { inner }
}
impl varlink::Interface for VarlinkInterfaceProxy {
fn get_description(&self) -> &'static str {
"interface io.systemd.Hostname\n\nmethod Describe() -> (\n\tHostname: string,\n\tStaticHostname: ?string,\n\tPrettyHostname: ?string,\n\tDefaultHostname: ?string,\n\tHostnameSource: string,\n\tIconName: ?string,\n\tChassis: ?string,\n\tDeployment: ?string,\n\tLocation: ?string,\n\tKernelName: string,\n\tKernelRelease: string,\n\tKernelVersion: string,\n\tOperatingSystemPrettyName: ?string,\n\tOperatingSystemCPEName: ?string,\n\tOperatingSystemHomeURL: ?string,\n\tOperatingSystemSupportEnd: ?int,\n\tOperatingSystemReleaseData: ?[]string,\n\tMachineInformationData: ?[]string,\n\tHardwareVendor: ?string,\n\tHardwareModel: ?string,\n\tHardwareSerial: ?string,\n\tFirmwareVersion: ?string,\n\tFirmwareVendor: ?string,\n\tFirmwareDate: ?int,\n\tMachineID: string,\n\tBootID: string,\n\tProductUUID: ?string,\n\tVSockCID: ?int\n)\n"
}
fn get_name(&self) -> &'static str {
"io.systemd.Hostname"
}
fn call_upgraded(
&self,
call: &mut varlink::Call,
bufreader: &mut dyn BufRead,
) -> varlink::Result<Vec<u8>> {
self.inner.call_upgraded(call, bufreader)
}
fn call(&self, call: &mut varlink::Call) -> varlink::Result<()> {
let req = call.request.unwrap();
match req.method.as_ref() {
"io.systemd.Hostname.Describe" => self.inner.describe(call as &mut dyn Call_Describe),
m => call.reply_method_not_found(String::from(m)),
}
}
}

3
hostd/src/main.rs Normal file
View file

@ -0,0 +1,3 @@
fn main() {
println!("Hello World!");
}

View file

@ -96,20 +96,18 @@ pub fn init_tracing(otel_endpoint: Option<&String>, log_stderr: bool) -> Result<
None => None,
};
let metrics_layer = match meter_provider {
Some(ref p) => Some(MetricsLayer::new(p.to_owned())),
None => None,
};
let metrics_layer = meter_provider
.as_ref()
.map(|p| MetricsLayer::new(p.to_owned()));
let tracer_provider = match otel_endpoint {
Some(endpoint) => Some(init_tracer_provider(endpoint, resource)?),
None => None,
};
let trace_layer = match tracer_provider {
Some(ref p) => Some(OpenTelemetryLayer::new(p.tracer("tracing-otel-subscriber"))),
None => None,
};
let trace_layer = tracer_provider
.as_ref()
.map(|p| OpenTelemetryLayer::new(p.tracer("tracing-otel-subscriber")));
opentelemetry::global::set_text_map_propagator(TraceContextPropagator::new());

View file

@ -5,11 +5,13 @@ default:
@just --choose
# Run controller
[group('controller')]
run-controller $RUST_LOG="debug,h2=info,hyper_util=info,tower=info":
cargo run --package patagia-controller -- --log-stderr
# Run controller local development
dev-controller:
[group('controller')]
dev-controller: dev-controller-db-migrate
watchexec --clear --restart --stop-signal INT --debounce 300ms -- just run-controller
# Run agent
@ -48,6 +50,10 @@ machete:
open-api:
cargo xtask open-api
# Update OpenAPI spec
gen-open-api:
cargo xtask open-api > api.json
# Run all tests
check: check-nix
@ -56,7 +62,12 @@ check-nix:
nix flake check
# Run PostgreSQL for development and testing
[group('controller')]
dev-postgres:
#!/usr/bin/env sh
if podman ps --filter "name=patagia-postgres" --filter "status=running" -q | grep -q .; then
exit 0
fi
mkdir -p "${XDG_RUNTIME_DIR}/patagia-postgres"
podman volume exists patagia-postgres || podman volume create patagia-postgres
podman run \
@ -69,12 +80,33 @@ dev-postgres:
--volume patagia-postgres:/var/lib/postgresql/data \
--volume "${XDG_RUNTIME_DIR}/patagia-postgres:/var/run/postgresql" \
docker.io/postgres:17
sleep 0.3
# Clean up PostgreSQL data
[group('controller')]
dev-postgres-clean:
podman rm -f patagia-postgres || true
podman volume rm patagia-postgres || true
# Connect to PostgreSQL with psql
[group('controller')]
dev-postgres-psql:
podman exec -it patagia-postgres psql -U patagia
[group('controller')]
[working-directory: 'controller']
dev-controller-db-migrate: dev-postgres
cargo sqlx migrate run
[group('controller')]
[working-directory: 'controller']
dev-controller-db-reset:
cargo sqlx db reset -y
[group('controller')]
[working-directory: 'controller']
gen-controller-sqlx-prepare:
cargo sqlx prepare
gen: gen-open-api gen-controller-sqlx-prepare fmt