From 5cba7dd2613d9644849ee7da9d28a206a7713867 Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Fri, 8 May 2026 18:21:58 +0200 Subject: [PATCH 1/3] feat: add framework macros and validated type definitions Introduce the framework module with compile-time validated string wrapper types (attributed_string_type macro) and typed Kubernetes/operator name types. Ported from the opensearch-operator reference implementation. All code is currently unused and will be wired into the controller in subsequent commits. Co-Authored-By: Claude Opus 4.6 --- Cargo.lock | 208 +++- Cargo.toml | 2 + rust/operator-binary/Cargo.toml | 2 + rust/operator-binary/src/framework.rs | 44 + rust/operator-binary/src/framework/macros.rs | 2 + .../macros/attributed_string_type.rs | 927 ++++++++++++++++++ .../src/framework/macros/constant.rs | 17 + rust/operator-binary/src/framework/types.rs | 3 + .../src/framework/types/common.rs | 68 ++ .../src/framework/types/kubernetes.rs | 191 ++++ .../src/framework/types/operator.rs | 91 ++ rust/operator-binary/src/main.rs | 1 + 12 files changed, 1553 insertions(+), 3 deletions(-) create mode 100644 rust/operator-binary/src/framework.rs create mode 100644 rust/operator-binary/src/framework/macros.rs create mode 100644 rust/operator-binary/src/framework/macros/attributed_string_type.rs create mode 100644 rust/operator-binary/src/framework/macros/constant.rs create mode 100644 rust/operator-binary/src/framework/types.rs create mode 100644 rust/operator-binary/src/framework/types/common.rs create mode 100644 rust/operator-binary/src/framework/types/kubernetes.rs create mode 100644 rust/operator-binary/src/framework/types/operator.rs diff --git a/Cargo.lock b/Cargo.lock index 575558a9..7794d2e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -830,6 +830,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + [[package]] name = "foldhash" version = "0.2.0" @@ -977,10 +983,23 @@ checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", "libc", - "r-efi", + "r-efi 5.3.0", "wasip2", ] +[[package]] +name = "getrandom" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555" +dependencies = [ + "cfg-if", + "libc", + "r-efi 6.0.0", + "wasip2", + "wasip3", +] + [[package]] name = "git2" version = "0.20.4" @@ -1042,6 +1061,15 @@ dependencies = [ "tracing", ] +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "foldhash 0.1.5", +] + [[package]] name = "hashbrown" version = "0.16.1" @@ -1050,7 +1078,7 @@ checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" dependencies = [ "allocator-api2", "equivalent", - "foldhash", + "foldhash 0.2.0", ] [[package]] @@ -1317,6 +1345,12 @@ dependencies = [ "zerovec", ] +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + [[package]] name = "ident_case" version = "1.0.1" @@ -1352,6 +1386,8 @@ checksum = "d466e9454f08e4a911e14806c24e16fba1b4c121d1ea474396f396069cf949d9" dependencies = [ "equivalent", "hashbrown 0.17.0", + "serde", + "serde_core", ] [[package]] @@ -1664,6 +1700,12 @@ dependencies = [ "spin", ] +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + [[package]] name = "libc" version = "0.2.185" @@ -2152,6 +2194,16 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn 2.0.117", +] + [[package]] name = "primeorder" version = "0.13.6" @@ -2233,6 +2285,12 @@ version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" +[[package]] +name = "r-efi" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" + [[package]] name = "rand" version = "0.8.6" @@ -2931,6 +2989,7 @@ dependencies = [ "indoc", "pin-project", "product-config", + "regex", "rstest", "semver", "serde", @@ -2941,6 +3000,7 @@ dependencies = [ "strum", "tokio", "tracing", + "uuid", ] [[package]] @@ -3652,6 +3712,17 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +[[package]] +name = "uuid" +version = "1.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd74a9687298c6858e9b88ec8935ec45d22e8fd5e6394fa1bd4e99a87789c76" +dependencies = [ + "getrandom 0.4.2", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "valuable" version = "0.1.1" @@ -3691,7 +3762,16 @@ version = "1.0.3+wasi-0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20064672db26d7cdc89c7798c48a0fdfac8213434a1186e5ef29fd560ae223d6" dependencies = [ - "wit-bindgen", + "wit-bindgen 0.57.1", +] + +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen 0.51.0", ] [[package]] @@ -3749,6 +3829,40 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap", + "wasm-encoder", + "wasmparser", +] + +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags", + "hashbrown 0.15.5", + "indexmap", + "semver", +] + [[package]] name = "web-sys" version = "0.3.95" @@ -3919,12 +4033,100 @@ dependencies = [ "memchr", ] +[[package]] +name = "wit-bindgen" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + [[package]] name = "wit-bindgen" version = "0.57.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ebf944e87a7c253233ad6766e082e3cd714b5d03812acc24c318f549614536e" +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap", + "prettyplease", + "syn 2.0.117", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.117", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags", + "indexmap", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] + [[package]] name = "writeable" version = "0.6.3" diff --git a/Cargo.toml b/Cargo.toml index ec6d3e67..aa4889f4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,6 +20,7 @@ const_format = "0.2" fnv = "1.0" futures = { version = "0.3", features = ["compat"] } indoc = "2.0" +regex = "1.11" pin-project = "1.1" rstest = "0.26" semver = "1.0" @@ -30,6 +31,7 @@ snafu = "0.9" strum = { version = "0.28", features = ["derive"] } tokio = { version = "1.40", features = ["full"] } tracing = "0.1" +uuid = { version = "1.16", features = ["v4"] } [patch."https://github.com/stackabletech/operator-rs.git"] # stackable-operator = { path = "../operator-rs/crates/stackable-operator" } diff --git a/rust/operator-binary/Cargo.toml b/rust/operator-binary/Cargo.toml index a841e2b7..ef13bfce 100644 --- a/rust/operator-binary/Cargo.toml +++ b/rust/operator-binary/Cargo.toml @@ -18,6 +18,7 @@ const_format.workspace = true fnv.workspace = true futures.workspace = true indoc.workspace = true +regex.workspace = true pin-project.workspace = true semver.workspace = true serde.workspace = true @@ -26,6 +27,7 @@ snafu.workspace = true strum.workspace = true tokio.workspace = true tracing.workspace = true +uuid.workspace = true [dev-dependencies] rstest.workspace = true diff --git a/rust/operator-binary/src/framework.rs b/rust/operator-binary/src/framework.rs new file mode 100644 index 00000000..8388e156 --- /dev/null +++ b/rust/operator-binary/src/framework.rs @@ -0,0 +1,44 @@ +//! Additions to stackable-operator +//! +//! Functions in stackable-operator usually accept generic types like strings and validate the +//! parameters as late as possible. Therefore, nearly all functions have to return a [`Result`] and +//! errors are returned along the call chain. That makes error handling complex because every +//! module re-packages the received error. Also, the validation is repeated if the value is used in +//! different function calls. Sometimes, validation is not necessary if constant values are used, +//! e.g. the name of the operator. +//! +//! This operator uses a different approach. The incoming values are validated as early as possible +//! and wrapped in a fail-safe type. This type is then used along the call chain, validation is not +//! necessary anymore and functions without side effects do not need to return a [`Result`]. +//! +//! However, this operator uses stackable-operator and at the interface, the fail-safe types must +//! be unwrapped and the [`Result`] returned by the stackable-operator function must be handled. +//! This is done by calling [`Result::expect`] which requires thorough testing. +//! +//! When the development of this module has progressed and changes become less frequent, then this +//! module can be incorporated into stackable-operator. The module structure should already +//! resemble the one of stackable-operator. + +use types::kubernetes::Uid; + +pub mod macros; +pub mod types; + +/// Has a non-empty name +/// +/// Useful as an object reference; Should not be used to create an object because the name could +/// violate the naming constraints (e.g. maximum length) of the object. +pub trait HasName { + #[allow(dead_code)] + fn to_name(&self) -> String; +} + +/// Has a Kubernetes UID +pub trait HasUid { + fn to_uid(&self) -> Uid; +} + +/// The name is a valid label value +pub trait NameIsValidLabelValue { + fn to_label_value(&self) -> String; +} diff --git a/rust/operator-binary/src/framework/macros.rs b/rust/operator-binary/src/framework/macros.rs new file mode 100644 index 00000000..c25def95 --- /dev/null +++ b/rust/operator-binary/src/framework/macros.rs @@ -0,0 +1,2 @@ +pub mod attributed_string_type; +pub mod constant; diff --git a/rust/operator-binary/src/framework/macros/attributed_string_type.rs b/rust/operator-binary/src/framework/macros/attributed_string_type.rs new file mode 100644 index 00000000..84b88515 --- /dev/null +++ b/rust/operator-binary/src/framework/macros/attributed_string_type.rs @@ -0,0 +1,927 @@ +use snafu::Snafu; +use strum::{EnumDiscriminants, IntoStaticStr}; + +/// Maximum length of label values +/// +/// Duplicates the private constant [`stackable_operator::kvp::LABEL_VALUE_MAX_LEN`] +pub const MAX_LABEL_VALUE_LENGTH: usize = 63; + +#[derive(Debug, EnumDiscriminants, Snafu)] +#[snafu(visibility(pub))] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("minimum length not met"))] + MinimumLengthNotMet { length: usize, min_length: usize }, + + #[snafu(display("maximum length exceeded"))] + LengthExceeded { length: usize, max_length: usize }, + + #[snafu(display("invalid regular expression"))] + InvalidRegex { source: regex::Error }, + + #[snafu(display("regular expression not matched"))] + RegexNotMatched { value: String, regex: &'static str }, + + #[snafu(display("not a valid label value"))] + InvalidLabelValue { + source: stackable_operator::kvp::LabelValueError, + }, + + #[snafu(display("not a valid label name as defined in RFC 1035"))] + InvalidRfc1035LabelName { + source: stackable_operator::validation::Errors, + }, + + #[snafu(display("not a valid DNS subdomain name as defined in RFC 1123"))] + InvalidRfc1123DnsSubdomainName { + source: stackable_operator::validation::Errors, + }, + + #[snafu(display("not a valid label name as defined in RFC 1123"))] + InvalidRfc1123LabelName { + source: stackable_operator::validation::Errors, + }, + + #[snafu(display("not a valid UUID"))] + InvalidUid { source: uuid::Error }, +} + +/// Helper data type to determine combined regular expressions +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Regex { + /// There is a regular expression but it is unknown (because it was too complicated to + /// calculate it). + Unknown, + + /// `MatchAll` equals `Expression(".*")`, but `MatchAll` can be pattern matched in a const + /// context, whereas `Expression(...)` cannot. + MatchAll, + + /// A regular expression + Expression(&'static str), +} + +impl Regex { + /// Combine this regular expression with the given one. + pub const fn combine(self, other: Regex) -> Regex { + match (self, other) { + (_, Regex::MatchAll) => self, + (Regex::MatchAll, _) => other, + // It is hard to combine two regular expressions and nearly impossible to do this in a + // const context. Fortunately, for most of the data types, only one regular expression + // is set. + _ => Regex::Unknown, + } + } +} + +/// Restricted string type with attributes like maximum length. +/// +/// Fully-qualified types are used to ease the import into other modules. +/// +/// # Examples +/// +/// ```rust +/// attributed_string_type! { +/// ConfigMapName, +/// "The name of a ConfigMap", +/// "hive-metastore-default", +/// is_rfc_1123_dns_subdomain_name +/// } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! attributed_string_type { + ($name:ident, $description:literal, $example:literal $(, $attribute:tt)*) => { + #[doc = std::concat!($description, ", e.g. \"", $example, "\"")] + #[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] + pub struct $name(String); + + impl $name { + /// The minimum length + pub const MIN_LENGTH: usize = attributed_string_type!(@min_length $($attribute)*); + + /// The maximum length + pub const MAX_LENGTH: usize = attributed_string_type!(@max_length $($attribute)*); + + /// The regular expression + /// + /// This field is not meant to be used outside of this macro. + pub const REGEX: $crate::framework::macros::attributed_string_type::Regex = attributed_string_type!(@regex $($attribute)*); + } + + impl stackable_operator::config::merge::Atomic for $name {} + + impl std::fmt::Display for $name { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } + } + + impl From<$name> for String { + fn from(value: $name) -> Self { + value.0 + } + } + + impl From<&$name> for String { + fn from(value: &$name) -> Self { + value.0.clone() + } + } + + impl AsRef for $name { + fn as_ref(&self) -> &str { + &self.0 + } + } + + impl std::str::FromStr for $name { + type Err = $crate::framework::macros::attributed_string_type::Error; + + fn from_str(s: &str) -> std::result::Result { + // ResultExt::context is used on most but not all usages of this macro + #[allow(unused_imports)] + use snafu::ResultExt; + + $(attributed_string_type!(@from_str $name, s, $attribute);)* + + Ok(Self(s.to_owned())) + } + } + + impl<'de> serde::Deserialize<'de> for $name { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let string: String = serde::Deserialize::deserialize(deserializer)?; + $name::from_str(&string).map_err(|err| serde::de::Error::custom(&err)) + } + } + + impl serde::Serialize for $name { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.0.serialize(serializer) + } + } + + // The JsonSchema implementation requires `max_length`. + impl stackable_operator::schemars::JsonSchema for $name { + fn schema_name() -> std::borrow::Cow<'static, str> { + std::stringify!($name).into() + } + + fn json_schema(_generator: &mut stackable_operator::schemars::generate::SchemaGenerator) -> stackable_operator::schemars::Schema { + stackable_operator::schemars::json_schema!({ + "type": "string", + "minLength": $name::MIN_LENGTH, + "maxLength": if $name::MAX_LENGTH != usize::MAX { + Some($name::MAX_LENGTH) + } else { + // Do not set maxLength if it is usize::MAX. + None + }, + "pattern": match $name::REGEX { + $crate::framework::macros::attributed_string_type::Regex::Expression(regex) => Some(regex), + _ => None + } + }) + } + } + + impl $name { + /// Converts a string to this type, panicking if the string is invalid. + /// + /// Only use this for compile-time constants or pre-validated values. + pub fn from_str_unsafe(s: &str) -> Self { + std::str::FromStr::from_str(s).expect("should be a valid {name}") + } + } + + #[cfg(test)] + impl $name { + // A dead_code warning is emitted if there is no unit test that calls this function. + pub fn test_example() { + Self::from_str_unsafe($example); + } + } + + $(attributed_string_type!(@trait_impl $name, $attribute);)* + }; + + // std::str::FromStr + + (@from_str $name:ident, $s:expr, (min_length = $min_length:expr)) => { + let length = $s.len() as usize; + snafu::ensure!( + length >= $name::MIN_LENGTH, + $crate::framework::macros::attributed_string_type::MinimumLengthNotMetSnafu { + length, + min_length: $name::MIN_LENGTH, + } + ); + }; + (@from_str $name:ident, $s:expr, (max_length = $max_length:expr)) => { + let length = $s.len() as usize; + snafu::ensure!( + length <= $name::MAX_LENGTH, + $crate::framework::macros::attributed_string_type::LengthExceededSnafu { + length, + max_length: $name::MAX_LENGTH, + } + ); + }; + (@from_str $name:ident, $s:expr, (regex = $regex:expr)) => { + let regex = regex::Regex::new($regex).context($crate::framework::macros::attributed_string_type::InvalidRegexSnafu)?; + snafu::ensure!( + regex.is_match($s), + $crate::framework::macros::attributed_string_type::RegexNotMatchedSnafu { + value: $s, + regex: $regex + } + ); + }; + (@from_str $name:ident, $s:expr, is_rfc_1035_label_name) => { + stackable_operator::validation::is_lowercase_rfc_1035_label($s).context($crate::framework::macros::attributed_string_type::InvalidRfc1035LabelNameSnafu)?; + }; + (@from_str $name:ident, $s:expr, is_rfc_1123_dns_subdomain_name) => { + stackable_operator::validation::is_lowercase_rfc_1123_subdomain($s).context($crate::framework::macros::attributed_string_type::InvalidRfc1123DnsSubdomainNameSnafu)?; + }; + (@from_str $name:ident, $s:expr, is_rfc_1123_label_name) => { + stackable_operator::validation::is_lowercase_rfc_1123_label($s).context($crate::framework::macros::attributed_string_type::InvalidRfc1123LabelNameSnafu)?; + }; + (@from_str $name:ident, $s:expr, is_valid_label_value) => { + stackable_operator::kvp::LabelValue::from_str($s).context($crate::framework::macros::attributed_string_type::InvalidLabelValueSnafu)?; + }; + (@from_str $name:ident, $s:expr, is_uid) => { + uuid::Uuid::try_parse($s).context($crate::framework::macros::attributed_string_type::InvalidUidSnafu)?; + }; + + // MIN_LENGTH + + (@min_length) => { + // The minimum String length is 0. + 0 + }; + (@min_length (min_length = $min_length:expr) $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::max( + $min_length, + attributed_string_type!(@min_length $($attribute)*) + ) + }; + (@min_length (max_length = $max_length:expr) $($attribute:tt)*) => { + // max_length has no opinion on the min_length. + attributed_string_type!(@min_length $($attribute)*) + }; + (@min_length (regex = $regex:expr) $($attribute:tt)*) => { + // regex has no influence on the min_length. + attributed_string_type!(@min_length $($attribute)*) + }; + (@min_length is_rfc_1035_label_name $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::max( + 1, + attributed_string_type!(@min_length $($attribute)*) + ) + }; + (@min_length is_rfc_1123_dns_subdomain_name $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::max( + 1, + attributed_string_type!(@min_length $($attribute)*) + ) + }; + (@min_length is_rfc_1123_label_name $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::max( + 1, + attributed_string_type!(@min_length $($attribute)*) + ) + }; + (@min_length is_valid_label_value $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::max( + 1, + attributed_string_type!(@min_length $($attribute)*) + ) + }; + (@min_length is_uid $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::max( + uuid::fmt::Hyphenated::LENGTH, + attributed_string_type!(@min_length $($attribute)*) + ) + }; + + // MAX_LENGTH + + (@max_length) => { + // If there is no other max_length defined, then the upper bound is usize::MAX. + usize::MAX + }; + (@max_length (min_length = $min_length:expr) $($attribute:tt)*) => { + // min_length has no opinion on the max_length. + attributed_string_type!(@max_length $($attribute)*) + }; + (@max_length (max_length = $max_length:expr) $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::min( + $max_length, + attributed_string_type!(@max_length $($attribute)*) + ) + }; + (@max_length (regex = $regex:expr) $($attribute:tt)*) => { + // regex has no influence on the max_length. + attributed_string_type!(@max_length $($attribute)*) + }; + (@max_length is_rfc_1035_label_name $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::min( + stackable_operator::validation::RFC_1035_LABEL_MAX_LENGTH, + attributed_string_type!(@max_length $($attribute)*) + ) + }; + (@max_length is_rfc_1123_dns_subdomain_name $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::min( + stackable_operator::validation::RFC_1123_SUBDOMAIN_MAX_LENGTH, + attributed_string_type!(@max_length $($attribute)*) + ) + }; + (@max_length is_rfc_1123_label_name $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::min( + stackable_operator::validation::RFC_1123_LABEL_MAX_LENGTH, + attributed_string_type!(@max_length $($attribute)*) + ) + }; + (@max_length is_valid_label_value $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::min( + $crate::framework::macros::attributed_string_type::MAX_LABEL_VALUE_LENGTH, + attributed_string_type!(@max_length $($attribute)*) + ) + }; + (@max_length is_uid $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::min( + uuid::fmt::Hyphenated::LENGTH, + attributed_string_type!(@max_length $($attribute)*) + ) + }; + + // REGEX + + (@regex) => { + // Everything is allowed if there is no other regular expression. + $crate::framework::macros::attributed_string_type::Regex::MatchAll + }; + (@regex (min_length = $min_length:expr) $($attribute:tt)*) => { + // min_length has no influence on the regular expression. + attributed_string_type!(@regex $($attribute)*) + }; + (@regex (max_length = $max_length:expr) $($attribute:tt)*) => { + // max_length has no influence on the regular expression. + attributed_string_type!(@regex $($attribute)*) + }; + (@regex (regex = $regex:expr) $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::Regex::Expression($regex) + .combine(attributed_string_type!(@regex $($attribute)*)) + }; + (@regex is_rfc_1035_label_name $($attribute:tt)*) => { + // see https://github.com/kubernetes/kubernetes/blob/v1.35.0/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L228 + $crate::framework::macros::attributed_string_type::Regex::Expression("^[a-z]([-a-z0-9]*[a-z0-9])?$") + .combine(attributed_string_type!(@regex $($attribute)*)) + }; + (@regex is_rfc_1123_dns_subdomain_name $($attribute:tt)*) => { + // see https://github.com/kubernetes/kubernetes/blob/v1.35.0/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L193 + $crate::framework::macros::attributed_string_type::Regex::Expression("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") + .combine(attributed_string_type!(@regex $($attribute)*)) + }; + (@regex is_rfc_1123_label_name $($attribute:tt)*) => { + // see https://github.com/kubernetes/kubernetes/blob/v1.35.0/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L163 + $crate::framework::macros::attributed_string_type::Regex::Expression("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") + .combine(attributed_string_type!(@regex $($attribute)*)) + }; + (@regex is_valid_label_value $($attribute:tt)*) => { + // regular expression from stackable_operator::kvp::label::LABEL_VALUE_REGEX + $crate::framework::macros::attributed_string_type::Regex::Expression("^[a-z0-9A-Z]([a-z0-9A-Z-_.]*[a-z0-9A-Z]+)?$") + .combine(attributed_string_type!(@regex $($attribute)*)) + }; + (@regex is_uid $($attribute:tt)*) => { + $crate::framework::macros::attributed_string_type::Regex::Expression("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") + .combine(attributed_string_type!(@regex $($attribute)*)) + }; + + // additional constants and trait implementations + + (@trait_impl $name:ident, (min_length = $max_length:expr)) => { + }; + (@trait_impl $name:ident, (max_length = $max_length:expr)) => { + }; + (@trait_impl $name:ident, (regex = $regex:expr)) => { + }; + (@trait_impl $name:ident, is_rfc_1035_label_name) => { + impl $name { + pub const IS_RFC_1035_LABEL_NAME: bool = true; + pub const IS_RFC_1123_LABEL_NAME: bool = true; + pub const IS_RFC_1123_SUBDOMAIN_NAME: bool = true; + } + }; + (@trait_impl $name:ident, is_rfc_1123_dns_subdomain_name) => { + impl $name { + pub const IS_RFC_1123_SUBDOMAIN_NAME: bool = true; + } + }; + (@trait_impl $name:ident, is_rfc_1123_label_name) => { + impl $name { + pub const IS_RFC_1123_LABEL_NAME: bool = true; + pub const IS_RFC_1123_SUBDOMAIN_NAME: bool = true; + } + }; + (@trait_impl $name:ident, is_valid_label_value) => { + impl $name { + pub const IS_VALID_LABEL_VALUE: bool = true; + } + + impl $crate::framework::NameIsValidLabelValue for $name { + fn to_label_value(&self) -> String { + self.0.clone() + } + } + }; + (@trait_impl $name:ident, is_uid) => { + impl From for $name { + fn from(value: uuid::Uuid) -> Self { + Self(value.to_string()) + } + } + + impl From<&uuid::Uuid> for $name { + fn from(value: &uuid::Uuid) -> Self { + Self(value.to_string()) + } + } + }; +} + +/// Returns the minimum of the given values. +/// +/// As opposed to [`std::cmp::min`], this function can be used at compile-time. +/// +/// # Examples +/// +/// ```rust +/// assert_eq!(2, min(2, 3)); +/// assert_eq!(4, min(5, 4)); +/// assert_eq!(1, min(1, 1)); +/// ``` +pub const fn min(x: usize, y: usize) -> usize { + if x < y { x } else { y } +} + +/// Returns the maximum of the given values. +/// +/// As opposed to [`std::cmp::max`], this function can be used at compile-time. +/// +/// # Examples +/// +/// ```rust +/// assert_eq!(3, max(2, 3)); +/// assert_eq!(5, max(5, 4)); +/// assert_eq!(1, max(1, 1)); +/// ``` +pub const fn max(x: usize, y: usize) -> usize { + if x < y { y } else { x } +} + +#[cfg(test)] +// `InvalidRegexTest` intentionally contains an invalid regular expression. +#[allow(clippy::invalid_regex)] +mod tests { + use std::str::FromStr; + + use serde_json::{Number, Value, json}; + use stackable_operator::schemars::{JsonSchema, SchemaGenerator}; + use uuid::uuid; + + use super::{ErrorDiscriminants, Regex}; + use crate::framework::NameIsValidLabelValue; + + attributed_string_type! { + MinLengthWithoutConstraintsTest, + "min_length test without constraints", + "" + } + + #[test] + fn test_attributed_string_type_min_length_without_constraints() { + type T = MinLengthWithoutConstraintsTest; + + T::test_example(); + assert_eq!(0, T::MIN_LENGTH); + } + + attributed_string_type! { + MinLengthWithConstraintsTest, + "min_length test with constraints", + "test", + (min_length = 2), // should set the minimum length to 2 + (max_length = 8), // should not affect the minimum length + (regex = "^.{4}$"), // should not affect the minimum length + is_rfc_1035_label_name, // should be overruled by the greater min_length + is_valid_label_value // should be overruled by the greater min_length + } + + #[test] + fn test_attributed_string_type_min_length_with_constraints() { + type T = MinLengthWithConstraintsTest; + + T::test_example(); + assert_eq!(2, T::MIN_LENGTH); + assert_eq!( + Err(ErrorDiscriminants::MinimumLengthNotMet), + T::from_str("a").map_err(ErrorDiscriminants::from) + ); + } + + attributed_string_type! { + MaxLengthWithoutConstraintsTest, + "max_length test without constraints", + "" + } + + #[test] + fn test_attributed_string_type_max_length_without_constraints() { + type T = MaxLengthWithoutConstraintsTest; + + T::test_example(); + assert_eq!(usize::MAX, T::MAX_LENGTH); + } + + attributed_string_type! { + MaxLengthWithConstraintsTest, + "max_length test with constraints", + "test", + (min_length = 2), // should not affect the maximum length + (max_length = 8), // should set the maximum length to 8 + (regex = "^.{4}$"), // should not affect the maximum length + is_rfc_1035_label_name, // should be overruled by the lower max_length + is_valid_label_value // should be overruled by the lower max_length + } + + #[test] + fn test_attributed_string_type_max_length_with_constraints() { + type T = MaxLengthWithConstraintsTest; + + T::test_example(); + assert_eq!(8, T::MAX_LENGTH); + assert_eq!( + Err(ErrorDiscriminants::LengthExceeded), + T::from_str("test-12345").map_err(ErrorDiscriminants::from) + ); + } + + attributed_string_type! { + RegexWithoutConstraintsTest, + "regex test without constraints", + "" + } + + #[test] + fn test_attributed_string_type_regex_without_constraints() { + type T = RegexWithoutConstraintsTest; + + T::test_example(); + assert_eq!(Regex::MatchAll, T::REGEX); + } + + attributed_string_type! { + RegexWithOneConstraintTest, + "regex test with one constraint", + "test", + (min_length = 2), // should not affect the regular expression + (max_length = 8), // should not affect the regular expression + (regex = "^[est]{4}$") // should set the regular expression to "[est]{4}" + } + + #[test] + fn test_attributed_string_type_regex_with_one_constraint() { + type T = RegexWithOneConstraintTest; + + T::test_example(); + assert_eq!(Regex::Expression("^[est]{4}$"), T::REGEX); + assert_eq!( + Err(ErrorDiscriminants::RegexNotMatched), + T::from_str("t-st").map_err(ErrorDiscriminants::from) + ); + } + + attributed_string_type! { + RegexWithMultipleConstraintsTest, + "regex test with multiple constraints", + "test", + (min_length = 2), // should not affect the regular expression + (max_length = 8), // should not affect the regular expression + (regex = "^[est]{4}$"), // should not be combinable with is_rfc_1123_dns_subdomain_name + is_rfc_1123_dns_subdomain_name // should not be combinable with regex + } + + #[test] + fn test_attributed_string_type_regex_with_multiple_constraints() { + type T = RegexWithMultipleConstraintsTest; + + T::test_example(); + assert_eq!(Regex::Unknown, T::REGEX); + assert_eq!( + Err(ErrorDiscriminants::RegexNotMatched), + T::from_str("t-st").map_err(ErrorDiscriminants::from) + ); + } + + attributed_string_type! { + InvalidRegexTest, + "regex test with invalid expression", + "test", + (min_length = 2), // should not affect the regular expression + (max_length = 8), // should not affect the regular expression + (regex = "{") // should throw an error at runtime + } + + #[test] + fn test_attributed_string_type_regex_with_invalid_expression() { + type T = InvalidRegexTest; + + // It is not known yet at compile-time that this expression is invalid. + assert_eq!(Regex::Expression("{"), T::REGEX); + assert_eq!( + Err(ErrorDiscriminants::InvalidRegex), + T::from_str("test").map_err(ErrorDiscriminants::from) + ); + } + + attributed_string_type! { + DisplayFmtTest, + "Display::fmt test", + "test" + } + + #[test] + fn test_attributed_string_type_display_fmt() { + type T = DisplayFmtTest; + + assert_eq!("test", format!("{}", T::from_str_unsafe("test"))); + } + + attributed_string_type! { + StringFromTest, + "String::from test", + "test" + } + + #[test] + fn test_attributed_string_type_string_from() { + type T = StringFromTest; + + T::test_example(); + assert_eq!("test", String::from(T::from_str_unsafe("test"))); + assert_eq!("test", String::from(&T::from_str_unsafe("test"))); + } + + attributed_string_type! { + DeserializeTest, + "serde::Deserialize test", + "test", + (min_length = 2), + (max_length = 4), + (regex = "^[est-]+$"), + is_rfc_1035_label_name + } + + #[test] + fn test_attributed_string_type_deserialize() { + type T = DeserializeTest; + + T::test_example(); + assert_eq!( + T::from_str_unsafe("test"), + serde_json::from_value(Value::String("test".to_owned())) + .expect("should be deserializable") + ); + assert_eq!( + Err("minimum length not met".to_owned()), + serde_json::from_value::(Value::String("e".to_owned())) + .map_err(|err| err.to_string()) + ); + assert_eq!( + Err("maximum length exceeded".to_owned()), + serde_json::from_value::(Value::String("testt".to_owned())) + .map_err(|err| err.to_string()) + ); + assert_eq!( + Err("regular expression not matched".to_owned()), + serde_json::from_value::(Value::String("abc".to_owned())) + .map_err(|err| err.to_string()) + ); + assert_eq!( + Err("not a valid label name as defined in RFC 1035".to_owned()), + serde_json::from_value::(Value::String("-tst".to_owned())) + .map_err(|err| err.to_string()) + ); + assert_eq!( + Err("invalid type: null, expected a string".to_owned()), + serde_json::from_value::(Value::Null).map_err(|err| err.to_string()) + ); + assert_eq!( + Err("invalid type: boolean `true`, expected a string".to_owned()), + serde_json::from_value::(Value::Bool(true)).map_err(|err| err.to_string()) + ); + assert_eq!( + Err("invalid type: integer `1`, expected a string".to_owned()), + serde_json::from_value::(Value::Number( + Number::from_i128(1).expect("should be a valid number") + )) + .map_err(|err| err.to_string()) + ); + assert_eq!( + Err("invalid type: sequence, expected a string".to_owned()), + serde_json::from_value::(Value::Array(vec![])).map_err(|err| err.to_string()) + ); + assert_eq!( + Err("invalid type: map, expected a string".to_owned()), + serde_json::from_value::(Value::Object(serde_json::Map::new())) + .map_err(|err| err.to_string()) + ); + } + + attributed_string_type! { + SerializeTest, + "serde::Serialize test", + "test" + } + + #[test] + fn test_attributed_string_type_serialize() { + type T = SerializeTest; + + T::test_example(); + assert_eq!( + "\"test\"".to_owned(), + serde_json::to_string(&T::from_str_unsafe("test")).expect("should be serializable") + ); + } + + attributed_string_type! { + JsonSchemaWithoutConstraintsTest, + "JsonSchema test with constraints", + "test" + } + + #[test] + fn test_attributed_string_type_json_schema_without_constaints() { + type T = JsonSchemaWithoutConstraintsTest; + + T::test_example(); + assert_eq!("JsonSchemaWithoutConstraintsTest", T::schema_name()); + assert_eq!( + json!({ + "type": "string", + "minLength": 0, + "maxLength": None::, + "pattern": None:: + }), + T::json_schema(&mut SchemaGenerator::default()) + ); + } + + attributed_string_type! { + JsonSchemaWithConstraintsTest, + "JsonSchema test with constraints", + "test", + (min_length = 4), + (max_length = 8), + (regex = "^[est]+$") + } + + #[test] + fn test_attributed_string_type_json_schema_with_constraints() { + type T = JsonSchemaWithConstraintsTest; + + T::test_example(); + assert_eq!("JsonSchemaWithConstraintsTest", T::schema_name()); + assert_eq!( + json!({ + "type": "string", + "minLength": 4, + "maxLength": 8, + "pattern": "^[est]+$" + }), + T::json_schema(&mut SchemaGenerator::default()) + ); + } + + attributed_string_type! { + IsRfc1035LabelNameTest, + "is_rfc_1035_label_name test", + "a-b", + is_rfc_1035_label_name + } + + #[test] + fn test_attributed_string_type_is_rfc_1035_label_name() { + type T = IsRfc1035LabelNameTest; + + let _ = T::IS_RFC_1035_LABEL_NAME; + let _ = T::IS_RFC_1123_LABEL_NAME; + let _ = T::IS_RFC_1123_SUBDOMAIN_NAME; + + T::test_example(); + assert_eq!( + Err(ErrorDiscriminants::InvalidRfc1035LabelName), + T::from_str("A").map_err(ErrorDiscriminants::from) + ); + } + + attributed_string_type! { + IsRfc1123DnsSubdomainNameTest, + "is_rfc_1123_dns_subdomain_name test", + "a-b.c", + is_rfc_1123_dns_subdomain_name + } + + #[test] + fn test_attributed_string_type_is_rfc_1123_dns_subdomain_name() { + type T = IsRfc1123DnsSubdomainNameTest; + + let _ = T::IS_RFC_1123_SUBDOMAIN_NAME; + + T::test_example(); + assert_eq!( + Err(ErrorDiscriminants::InvalidRfc1123DnsSubdomainName), + T::from_str("A").map_err(ErrorDiscriminants::from) + ); + } + + attributed_string_type! { + IsRfc1123LabelNameTest, + "is_rfc_1123_label_name test", + "1-a", + is_rfc_1123_label_name + } + + #[test] + fn test_attributed_string_type_is_rfc_1123_label_name() { + type T = IsRfc1123LabelNameTest; + + let _ = T::IS_RFC_1123_LABEL_NAME; + let _ = T::IS_RFC_1123_SUBDOMAIN_NAME; + + T::test_example(); + assert_eq!( + Err(ErrorDiscriminants::InvalidRfc1123LabelName), + T::from_str("A").map_err(ErrorDiscriminants::from) + ); + } + + attributed_string_type! { + IsValidLabelValueTest, + "is_valid_label_value test", + "a-_.1", + is_valid_label_value + } + + #[test] + fn test_attributed_string_type_is_valid_label_value() { + type T = IsValidLabelValueTest; + + let _ = T::IS_VALID_LABEL_VALUE; + + T::test_example(); + assert_eq!( + Err(ErrorDiscriminants::InvalidLabelValue), + T::from_str("invalid label value").map_err(ErrorDiscriminants::from) + ); + assert_eq!( + "label-value", + T::from_str_unsafe("label-value").to_label_value() + ); + } + + attributed_string_type! { + IsUidTest, + "is_uid test", + "c27b3971-ca72-42c1-80a4-abdfc1db0ddd", + is_uid + } + + #[test] + fn test_attributed_string_type_is_uid() { + type T = IsUidTest; + + T::test_example(); + assert_eq!( + Err(ErrorDiscriminants::InvalidUid), + T::from_str("invalid UID").map_err(ErrorDiscriminants::from) + ); + assert_eq!( + "c27b3971-ca72-42c1-80a4-abdfc1db0ddd", + T::from(uuid!("c27b3971-ca72-42c1-80a4-abdfc1db0ddd")).to_string() + ); + assert_eq!( + "c27b3971-ca72-42c1-80a4-abdfc1db0ddd", + T::from(&uuid!("c27b3971-ca72-42c1-80a4-abdfc1db0ddd")).to_string() + ); + } +} diff --git a/rust/operator-binary/src/framework/macros/constant.rs b/rust/operator-binary/src/framework/macros/constant.rs new file mode 100644 index 00000000..ae4e9c69 --- /dev/null +++ b/rust/operator-binary/src/framework/macros/constant.rs @@ -0,0 +1,17 @@ +/// Use [`std::sync::LazyLock`] to define a static "constant" from a string. +/// +/// The string is converted into the given type with [`std::str::FromStr::from_str`]. +/// +/// # Examples +/// +/// ```rust +/// constant!(DATA_VOLUME_NAME: VolumeName = "data"); +/// constant!(pub CONFIG_VOLUME_NAME: VolumeName = "config"); +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! constant { + ($qualifier:vis $name:ident: $type:ident = $value:literal) => { + $qualifier static $name: std::sync::LazyLock<$type> = + std::sync::LazyLock::new(|| $type::from_str($value).expect("should be a valid $name")); + }; +} diff --git a/rust/operator-binary/src/framework/types.rs b/rust/operator-binary/src/framework/types.rs new file mode 100644 index 00000000..65f61166 --- /dev/null +++ b/rust/operator-binary/src/framework/types.rs @@ -0,0 +1,3 @@ +pub mod common; +pub mod kubernetes; +pub mod operator; diff --git a/rust/operator-binary/src/framework/types/common.rs b/rust/operator-binary/src/framework/types/common.rs new file mode 100644 index 00000000..3d7326ef --- /dev/null +++ b/rust/operator-binary/src/framework/types/common.rs @@ -0,0 +1,68 @@ +//! Common types that do not belong (yet) to a more specific module +use snafu::{ResultExt, Snafu}; +use strum::{EnumDiscriminants, IntoStaticStr}; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("failed to convert to port number"))] + ConvertToPortNumber { source: std::num::TryFromIntError }, +} + +/// A port number +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct Port(pub u16); + +impl std::fmt::Display for Port { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +impl From for Port { + fn from(value: u16) -> Self { + Port(value) + } +} + +impl From for i32 { + fn from(value: Port) -> Self { + value.0 as i32 + } +} + +impl TryFrom for Port { + type Error = Error; + + fn try_from(value: i32) -> Result { + Ok(Port( + u16::try_from(value).context(ConvertToPortNumberSnafu)?, + )) + } +} + +#[cfg(test)] +mod tests { + + use super::{ErrorDiscriminants, Port}; + + #[test] + fn test_port_fmt() { + assert_eq!("0".to_owned(), Port(0).to_string()); + assert_eq!("65535".to_owned(), Port(65535).to_string()); + } + + #[test] + fn test_port_try_from_i32() { + assert_eq!(Some(Port(0)), Port::try_from(0).ok()); + assert_eq!(Some(Port(65535)), Port::try_from(65535).ok()); + assert_eq!( + Err(ErrorDiscriminants::ConvertToPortNumber), + Port::try_from(-1).map_err(ErrorDiscriminants::from) + ); + assert_eq!( + Err(ErrorDiscriminants::ConvertToPortNumber), + Port::try_from(65536).map_err(ErrorDiscriminants::from) + ); + } +} diff --git a/rust/operator-binary/src/framework/types/kubernetes.rs b/rust/operator-binary/src/framework/types/kubernetes.rs new file mode 100644 index 00000000..3bd94640 --- /dev/null +++ b/rust/operator-binary/src/framework/types/kubernetes.rs @@ -0,0 +1,191 @@ +//! Kubernetes (resource) names +use std::str::FromStr; + +use stackable_operator::validation::{RFC_1123_LABEL_MAX_LENGTH, RFC_1123_SUBDOMAIN_MAX_LENGTH}; + +use crate::attributed_string_type; + +attributed_string_type! { + ConfigMapName, + "The name of a ConfigMap", + "hive-metastore-default", + is_rfc_1123_dns_subdomain_name +} + +attributed_string_type! { + ConfigMapKey, + "The key for a ConfigMap", + "hive-site.xml", + (min_length = 1), + // see https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L435-L451 + (max_length = RFC_1123_SUBDOMAIN_MAX_LENGTH), + (regex = "^[-._a-zA-Z0-9]+$") +} + +attributed_string_type! { + ContainerName, + "The name of a container in a Pod", + "hive", + is_rfc_1123_label_name +} + +attributed_string_type! { + ClusterRoleName, + "The name of a ClusterRole", + "hive-clusterrole", + // On the one hand, ClusterRoles must only contain characters that are allowed for DNS + // subdomain names, on the other hand, their length does not seem to be restricted – at least + // on Kind. However, 253 characters are sufficient for the Stackable operators, and to avoid + // problems on other Kubernetes providers, the length is restricted here. + is_rfc_1123_dns_subdomain_name +} + +attributed_string_type! { + Hostname, + "A hostname", + "example.com", + (min_length = 1), + (max_length = 253), + // see https://en.wikipedia.org/wiki/Hostname#Syntax + (regex = "^[a-zA-Z0-9]([-a-zA-Z0-9]{0,60}[a-zA-Z0-9])?(\\.[a-zA-Z0-9]([-a-zA-Z0-9]{0,60}[a-zA-Z0-9])?)*\\.?$") +} + +attributed_string_type! { + ListenerName, + "The name of a Listener", + "hive-metastore-default", + is_rfc_1123_dns_subdomain_name +} + +attributed_string_type! { + ListenerClassName, + "The name of a ListenerClass", + "external-stable", + is_rfc_1123_dns_subdomain_name +} + +attributed_string_type! { + NamespaceName, + "The name of a Namespace", + "stackable-operators", + is_rfc_1123_label_name, + is_valid_label_value +} + +attributed_string_type! { + PersistentVolumeClaimName, + "The name of a PersistentVolumeClaim", + "config", + is_rfc_1123_dns_subdomain_name +} + +attributed_string_type! { + RoleBindingName, + "The name of a RoleBinding", + "hive-rolebinding", + // On the one hand, RoleBindings must only contain characters that are allowed for DNS + // subdomain names, on the other hand, their length does not seem to be restricted – at least + // on Kind. However, 253 characters are sufficient for the Stackable operators, and to avoid + // problems on other Kubernetes providers, the length is restricted here. + is_rfc_1123_dns_subdomain_name +} + +attributed_string_type! { + SecretClassName, + "The name of a SecretClass", + "tls", + // The secret class name is used in an annotation on the tls volume. + is_rfc_1123_dns_subdomain_name +} + +attributed_string_type! { + SecretKey, + "The key for a Secret", + "accessKey", + (min_length = 1), + // see https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L435-L451 + (max_length = RFC_1123_SUBDOMAIN_MAX_LENGTH), + (regex = "^[-._a-zA-Z0-9]+$") +} + +attributed_string_type! { + SecretName, + "The name of a Secret", + "hive-internal-secret", + is_rfc_1123_dns_subdomain_name +} + +attributed_string_type! { + ServiceAccountName, + "The name of a ServiceAccount", + "hive-serviceaccount", + is_rfc_1123_dns_subdomain_name +} + +attributed_string_type! { + ServiceName, + "The name of a Service", + "hive-metastore-default", + is_rfc_1035_label_name, + is_valid_label_value +} + +attributed_string_type! { + StatefulSetName, + "The name of a StatefulSet", + "hive-metastore-default", + (max_length = + // see https://github.com/kubernetes/kubernetes/issues/64023 + RFC_1123_LABEL_MAX_LENGTH + - 1 /* dash */ + - 10 /* digits for the controller-revision-hash label */), + is_rfc_1123_label_name, + is_valid_label_value +} + +attributed_string_type! { + Uid, + "A UID", + "c27b3971-ca72-42c1-80a4-abdfc1db0ddd", + is_uid, + is_valid_label_value +} + +attributed_string_type! { + VolumeName, + "The name of a Volume", + "config", + is_rfc_1123_label_name, + is_valid_label_value +} + +#[cfg(test)] +mod tests { + use super::{ + ClusterRoleName, ConfigMapKey, ConfigMapName, ContainerName, Hostname, ListenerClassName, + ListenerName, NamespaceName, PersistentVolumeClaimName, RoleBindingName, SecretClassName, + SecretKey, SecretName, ServiceAccountName, ServiceName, StatefulSetName, Uid, VolumeName, + }; + + #[test] + fn test_attributed_string_type_examples() { + ConfigMapName::test_example(); + ConfigMapKey::test_example(); + ContainerName::test_example(); + ClusterRoleName::test_example(); + Hostname::test_example(); + ListenerName::test_example(); + ListenerClassName::test_example(); + NamespaceName::test_example(); + PersistentVolumeClaimName::test_example(); + RoleBindingName::test_example(); + SecretClassName::test_example(); + SecretKey::test_example(); + SecretName::test_example(); + ServiceAccountName::test_example(); + ServiceName::test_example(); + StatefulSetName::test_example(); + Uid::test_example(); + VolumeName::test_example(); + } +} diff --git a/rust/operator-binary/src/framework/types/operator.rs b/rust/operator-binary/src/framework/types/operator.rs new file mode 100644 index 00000000..ae79ab28 --- /dev/null +++ b/rust/operator-binary/src/framework/types/operator.rs @@ -0,0 +1,91 @@ +//! Names for operators + +use std::str::FromStr; + +use crate::attributed_string_type; + +attributed_string_type! { + ProductName, + "The name of a product", + "hive", + // A suffix is added to produce a label value. An according compile-time check ensures that + // max_length cannot be set higher. + (max_length = 54), + is_rfc_1123_dns_subdomain_name, + is_valid_label_value +} + +attributed_string_type! { + ProductVersion, + "The version of a product", + "4.0.1", + is_valid_label_value +} + +attributed_string_type! { + ClusterName, + "The name of a cluster/stacklet", + "my-hive-cluster", + // Suffixes are added to produce resource names. According compile-time checks ensure that + // max_length cannot be set higher. + (max_length = 24), + is_rfc_1035_label_name, + is_valid_label_value +} + +attributed_string_type! { + ControllerName, + "The name of a controller in an operator", + "hivecluster", + is_valid_label_value +} + +attributed_string_type! { + OperatorName, + "The name of an operator", + "hive.stackable.tech", + is_valid_label_value +} + +attributed_string_type! { + RoleGroupName, + "The name of a role-group name", + "default", + // The role-group name is used to produce resource names. To make sure that all resource names + // are valid, max_length is restricted. Compile-time checks ensure that max_length cannot be + // set higher if not other names like the RoleName are set lower accordingly. + (max_length = 16), + is_rfc_1123_label_name, + is_valid_label_value +} + +attributed_string_type! { + RoleName, + "The name of a role name", + "metastore", + // The role name is used to produce resource names. To make sure that all resource names are + // valid, max_length is restricted. Compile-time checks ensure that max_length cannot be set + // higher if not other names like the RoleGroupName are set lower accordingly. + (max_length = 10), + is_rfc_1123_label_name, + is_valid_label_value +} + +#[cfg(test)] +mod tests { + use super::{ + ClusterName, ControllerName, OperatorName, ProductName, ProductVersion, RoleGroupName, + RoleName, + }; + + #[test] + fn test_attributed_string_type_examples() { + ProductName::test_example(); + ProductVersion::test_example(); + ClusterName::test_example(); + ControllerName::test_example(); + OperatorName::test_example(); + RoleGroupName::test_example(); + RoleName::test_example(); + } +} diff --git a/rust/operator-binary/src/main.rs b/rust/operator-binary/src/main.rs index 80f00860..6e950154 100644 --- a/rust/operator-binary/src/main.rs +++ b/rust/operator-binary/src/main.rs @@ -42,6 +42,7 @@ mod config; mod controller; mod crd; mod discovery; +mod framework; mod kerberos; mod listener; mod operations; From b7e9c5601fc220e1d826158b080da04e131ba5fe Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Fri, 8 May 2026 18:23:22 +0200 Subject: [PATCH 2/3] feat: add framework builder, kvp, and utility modules Port remaining framework modules from the opensearch-operator reference: builder (meta, pdb, pod, statefulset), kvp labels, product logging, cluster resources, controller utils, role/role-group utilities. These provide infallible wrappers around stackable-operator builder APIs and type-safe resource name derivation with compile-time length assertions. All code is currently unused and will be wired into the controller in the next commit. Co-Authored-By: Claude Opus 4.6 --- rust/operator-binary/src/framework.rs | 16 + rust/operator-binary/src/framework/builder.rs | 8 + .../src/framework/builder/meta.rs | 108 +++++ .../src/framework/builder/pdb.rs | 161 ++++++++ .../src/framework/builder/pod.rs | 2 + .../src/framework/builder/pod/container.rs | 367 +++++++++++++++++ .../src/framework/builder/pod/volume.rs | 48 +++ .../src/framework/builder/statefulset.rs | 118 ++++++ .../src/framework/cluster_resources.rs | 50 +++ .../src/framework/controller_utils.rs | 211 ++++++++++ rust/operator-binary/src/framework/kvp.rs | 1 + .../src/framework/kvp/label.rs | 195 ++++++++++ .../src/framework/product_logging.rs | 1 + .../framework/product_logging/framework.rs | 127 ++++++ .../src/framework/role_group_utils.rs | 151 +++++++ .../src/framework/role_utils.rs | 368 ++++++++++++++++++ 16 files changed, 1932 insertions(+) create mode 100644 rust/operator-binary/src/framework/builder.rs create mode 100644 rust/operator-binary/src/framework/builder/meta.rs create mode 100644 rust/operator-binary/src/framework/builder/pdb.rs create mode 100644 rust/operator-binary/src/framework/builder/pod.rs create mode 100644 rust/operator-binary/src/framework/builder/pod/container.rs create mode 100644 rust/operator-binary/src/framework/builder/pod/volume.rs create mode 100644 rust/operator-binary/src/framework/builder/statefulset.rs create mode 100644 rust/operator-binary/src/framework/cluster_resources.rs create mode 100644 rust/operator-binary/src/framework/controller_utils.rs create mode 100644 rust/operator-binary/src/framework/kvp.rs create mode 100644 rust/operator-binary/src/framework/kvp/label.rs create mode 100644 rust/operator-binary/src/framework/product_logging.rs create mode 100644 rust/operator-binary/src/framework/product_logging/framework.rs create mode 100644 rust/operator-binary/src/framework/role_group_utils.rs create mode 100644 rust/operator-binary/src/framework/role_utils.rs diff --git a/rust/operator-binary/src/framework.rs b/rust/operator-binary/src/framework.rs index 8388e156..8bc3c995 100644 --- a/rust/operator-binary/src/framework.rs +++ b/rust/operator-binary/src/framework.rs @@ -21,7 +21,21 @@ use types::kubernetes::Uid; +#[allow(dead_code)] +pub mod builder; +#[allow(dead_code)] +pub mod cluster_resources; +#[allow(dead_code)] +pub mod controller_utils; +#[allow(dead_code)] +pub mod kvp; pub mod macros; +#[allow(dead_code)] +pub mod product_logging; +#[allow(dead_code)] +pub mod role_group_utils; +#[allow(dead_code)] +pub mod role_utils; pub mod types; /// Has a non-empty name @@ -34,11 +48,13 @@ pub trait HasName { } /// Has a Kubernetes UID +#[allow(dead_code)] pub trait HasUid { fn to_uid(&self) -> Uid; } /// The name is a valid label value +#[allow(dead_code)] pub trait NameIsValidLabelValue { fn to_label_value(&self) -> String; } diff --git a/rust/operator-binary/src/framework/builder.rs b/rust/operator-binary/src/framework/builder.rs new file mode 100644 index 00000000..a6530b5d --- /dev/null +++ b/rust/operator-binary/src/framework/builder.rs @@ -0,0 +1,8 @@ +#[allow(dead_code)] +pub mod meta; +#[allow(dead_code)] +pub mod pdb; +#[allow(dead_code)] +pub mod pod; +#[allow(dead_code)] +pub mod statefulset; diff --git a/rust/operator-binary/src/framework/builder/meta.rs b/rust/operator-binary/src/framework/builder/meta.rs new file mode 100644 index 00000000..127b351b --- /dev/null +++ b/rust/operator-binary/src/framework/builder/meta.rs @@ -0,0 +1,108 @@ +use stackable_operator::{ + builder::meta::OwnerReferenceBuilder, + k8s_openapi::apimachinery::pkg::apis::meta::v1::OwnerReference, kube::Resource, +}; + +use crate::framework::{HasName, HasUid}; + +/// Infallible variant of +/// [`stackable_operator::builder::meta::ObjectMetaBuilder::ownerreference_from_resource`] +pub fn ownerreference_from_resource( + resource: &(impl Resource + HasName + HasUid), + block_owner_deletion: Option, + controller: Option, +) -> OwnerReference { + OwnerReferenceBuilder::new() + // Set api_version, kind, name and additionally the UID if it exists. + .initialize_from_resource(resource) + // Ensure that the name is set. + .name(resource.to_name()) + // Ensure that the UID is set. + .uid(resource.to_uid().to_string()) + .block_owner_deletion_opt(block_owner_deletion) + .controller_opt(controller) + .build() + .expect( + "OwnerReference should be created because the resource has an api_version, kind, name \ + and uid.", + ) +} + +#[cfg(test)] +mod tests { + use std::borrow::Cow; + + use stackable_operator::{ + k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta, kube::Resource, + }; + + use crate::framework::{ + HasName, HasUid, builder::meta::ownerreference_from_resource, types::kubernetes::Uid, + }; + + struct TestCluster { + object_meta: ObjectMeta, + } + + impl TestCluster { + fn new() -> Self { + TestCluster { + object_meta: ObjectMeta { + name: Some("test-cluster".to_owned()), + uid: Some("a6b89911-d48e-4328-88d6-b9251226583d".to_owned()), + ..ObjectMeta::default() + }, + } + } + } + + impl Resource for TestCluster { + type DynamicType = (); + type Scope = (); + + fn kind(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("HiveCluster") + } + + fn group(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("hive.stackable.tech") + } + + fn version(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("v1alpha1") + } + + fn plural(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("hiveclusters") + } + + fn meta(&self) -> &ObjectMeta { + &self.object_meta + } + + fn meta_mut(&mut self) -> &mut ObjectMeta { + &mut self.object_meta + } + } + + impl HasName for TestCluster { + fn to_name(&self) -> String { + self.object_meta.name.clone().expect("set in new()") + } + } + + impl HasUid for TestCluster { + fn to_uid(&self) -> Uid { + Uid::from_str_unsafe(&self.object_meta.uid.clone().expect("set in new()")) + } + } + + #[test] + fn test_ownerreference_from_resource() { + let owner_ref = ownerreference_from_resource(&TestCluster::new(), Some(true), Some(true)); + assert_eq!(owner_ref.name, "test-cluster"); + assert_eq!(owner_ref.uid, "a6b89911-d48e-4328-88d6-b9251226583d"); + assert_eq!(owner_ref.controller, Some(true)); + assert_eq!(owner_ref.block_owner_deletion, Some(true)); + } +} diff --git a/rust/operator-binary/src/framework/builder/pdb.rs b/rust/operator-binary/src/framework/builder/pdb.rs new file mode 100644 index 00000000..46e29310 --- /dev/null +++ b/rust/operator-binary/src/framework/builder/pdb.rs @@ -0,0 +1,161 @@ +use stackable_operator::{ + builder::pdb::PodDisruptionBudgetBuilder, + k8s_openapi::apimachinery::pkg::apis::meta::v1::LabelSelector, + kube::{Resource, api::ObjectMeta}, +}; + +use crate::framework::{ + HasName, HasUid, NameIsValidLabelValue, + types::operator::{ControllerName, OperatorName, ProductName, RoleName}, +}; + +/// Infallible variant of +/// [`stackable_operator::builder::pdb::PodDisruptionBudgetBuilder::new_with_role`] +pub fn pod_disruption_budget_builder_with_role( + owner: &(impl Resource + HasName + NameIsValidLabelValue + HasUid), + product_name: &ProductName, + role_name: &RoleName, + operator_name: &OperatorName, + controller_name: &ControllerName, +) -> PodDisruptionBudgetBuilder { + PodDisruptionBudgetBuilder::new_with_role( + owner, + &product_name.to_label_value(), + &role_name.to_label_value(), + &operator_name.to_label_value(), + &controller_name.to_label_value(), + ) + .expect( + "PodDisruptionBudgetBuilder should be created because the owner has an object name and UID \ + and all given parameters produce valid label values.", + ) +} + +#[cfg(test)] +mod tests { + use std::borrow::Cow; + + use stackable_operator::{ + k8s_openapi::{ + api::policy::v1::{PodDisruptionBudget, PodDisruptionBudgetSpec}, + apimachinery::pkg::{ + apis::meta::v1::{LabelSelector, ObjectMeta, OwnerReference}, + util::intstr::IntOrString, + }, + }, + kube::Resource, + }; + + use crate::framework::{ + HasName, HasUid, NameIsValidLabelValue, + builder::pdb::pod_disruption_budget_builder_with_role, + types::{ + kubernetes::Uid, + operator::{ControllerName, OperatorName, ProductName, RoleName}, + }, + }; + + struct Cluster { + object_meta: ObjectMeta, + } + + impl Cluster { + fn new() -> Self { + Cluster { + object_meta: ObjectMeta { + name: Some("cluster-name".to_owned()), + uid: Some("a6b89911-d48e-4328-88d6-b9251226583d".to_owned()), + ..ObjectMeta::default() + }, + } + } + } + + impl Resource for Cluster { + type DynamicType = (); + type Scope = (); + + fn kind(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("HiveCluster") } + fn group(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("hive.stackable.tech") } + fn version(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("v1alpha1") } + fn plural(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("hiveclusters") } + + fn meta(&self) -> &ObjectMeta { &self.object_meta } + fn meta_mut(&mut self) -> &mut ObjectMeta { &mut self.object_meta } + } + + impl HasName for Cluster { + fn to_name(&self) -> String { + self.object_meta.name.clone().expect("set in new()") + } + } + + impl HasUid for Cluster { + fn to_uid(&self) -> Uid { + Uid::from_str_unsafe(&self.object_meta.uid.clone().expect("set in new()")) + } + } + + impl NameIsValidLabelValue for Cluster { + fn to_label_value(&self) -> String { + self.object_meta.name.clone().expect("set in new()") + } + } + + #[test] + fn test_pod_disruption_budget_builder_with_role() { + let actual_pdb = pod_disruption_budget_builder_with_role( + &Cluster::new(), + &ProductName::from_str_unsafe("my-product"), + &RoleName::from_str_unsafe("my-role"), + &OperatorName::from_str_unsafe("my-operator"), + &ControllerName::from_str_unsafe("my-controller"), + ) + .with_max_unavailable(2) + .build(); + + let expected_pdb = PodDisruptionBudget { + metadata: ObjectMeta { + labels: Some( + [ + ("app.kubernetes.io/component", "my-role"), + ("app.kubernetes.io/instance", "cluster-name"), + ("app.kubernetes.io/managed-by", "my-operator_my-controller"), + ("app.kubernetes.io/name", "my-product"), + ] + .map(|(k, v)| (k.to_owned(), v.to_owned())) + .into(), + ), + name: Some("cluster-name-my-role".to_owned()), + owner_references: Some(vec![OwnerReference { + api_version: "hive.stackable.tech/v1alpha1".to_owned(), + controller: Some(true), + kind: "HiveCluster".to_owned(), + name: "cluster-name".to_owned(), + uid: "a6b89911-d48e-4328-88d6-b9251226583d".to_owned(), + ..OwnerReference::default() + }]), + ..ObjectMeta::default() + }, + spec: Some(PodDisruptionBudgetSpec { + max_unavailable: Some(IntOrString::Int(2)), + selector: Some(LabelSelector { + match_labels: Some( + [ + ("app.kubernetes.io/component", "my-role"), + ("app.kubernetes.io/instance", "cluster-name"), + ("app.kubernetes.io/name", "my-product"), + ] + .map(|(k, v)| (k.to_owned(), v.to_owned())) + .into(), + ), + ..LabelSelector::default() + }), + ..PodDisruptionBudgetSpec::default() + }), + ..PodDisruptionBudget::default() + }; + + assert_eq!(expected_pdb, actual_pdb); + } +} diff --git a/rust/operator-binary/src/framework/builder/pod.rs b/rust/operator-binary/src/framework/builder/pod.rs new file mode 100644 index 00000000..df93bd44 --- /dev/null +++ b/rust/operator-binary/src/framework/builder/pod.rs @@ -0,0 +1,2 @@ +pub mod container; +pub mod volume; diff --git a/rust/operator-binary/src/framework/builder/pod/container.rs b/rust/operator-binary/src/framework/builder/pod/container.rs new file mode 100644 index 00000000..244bf003 --- /dev/null +++ b/rust/operator-binary/src/framework/builder/pod/container.rs @@ -0,0 +1,367 @@ +use std::{collections::BTreeMap, fmt::Display, str::FromStr}; + +use snafu::Snafu; +use stackable_operator::{ + builder::pod::container::{ContainerBuilder, FieldPathEnvVar}, + k8s_openapi::api::core::v1::{ConfigMapKeySelector, EnvVar, EnvVarSource, ObjectFieldSelector}, +}; +use strum::{EnumDiscriminants, IntoStaticStr}; + +use crate::framework::types::kubernetes::{ConfigMapKey, ConfigMapName, ContainerName}; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display( + "invalid environment variable name: a valid environment variable name must not be empty \ + and must consist only of printable ASCII characters other than '='" + ))] + ParseEnvVarName { env_var_name: String }, +} + +/// Infallible variant of [`stackable_operator::builder::pod::container::ContainerBuilder::new`] +pub fn new_container_builder(container_name: &ContainerName) -> ContainerBuilder { + ContainerBuilder::new(container_name.as_ref()).expect("should be a valid container name") +} + +// TODO Use attributed_string_type instead +/// Validated environment variable name +#[derive(Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct EnvVarName(String); + +impl EnvVarName { + /// Creates an [`EnvVarName`] from the given string and panics if the validation failed + /// + /// Use this only with constant names that are also tested in unit tests! + pub fn from_str_unsafe(s: &str) -> Self { + EnvVarName::from_str(s).expect("should be a valid environment variable name") + } +} + +impl Display for EnvVarName { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +impl FromStr for EnvVarName { + type Err = Error; + + fn from_str(s: &str) -> Result { + // The length of environment variable names seems not to be restricted. + + if !s.is_empty() && s.chars().all(|c| matches!(c, ' '..='<' | '>'..='~')) { + Ok(Self(s.to_owned())) + } else { + Err(Error::ParseEnvVarName { + env_var_name: s.to_owned(), + }) + } + } +} + +/// A set of [`EnvVar`]s +/// +/// The environment variable names in the set are unique. +#[derive(Clone, Debug, Default, PartialEq)] +pub struct EnvVarSet(BTreeMap); + +impl EnvVarSet { + /// Creates an empty [`EnvVarSet`] + pub fn new() -> Self { + Self::default() + } + + /// Returns a reference to the [`EnvVar`] with the given name + pub fn get(&self, env_var_name: &EnvVarName) -> Option<&EnvVar> { + self.0.get(env_var_name) + } + + /// Moves all [`EnvVar`]s from the given set into this one. + /// + /// [`EnvVar`]s with the same name are overridden. + pub fn merge(mut self, mut env_var_set: EnvVarSet) -> Self { + self.0.append(&mut env_var_set.0); + + self + } + + /// Adds the given [`EnvVar`]s to this set + /// + /// [`EnvVar`]s with the same name are overridden. + pub fn with_values(self, env_vars: I) -> Self + where + I: IntoIterator, + V: Into, + { + env_vars + .into_iter() + .fold(self, |extended_env_vars, (name, value)| { + extended_env_vars.with_value(&name, value) + }) + } + + /// Adds an environment variable with the given name and string value to this set + /// + /// An [`EnvVar`] with the same name is overridden. + pub fn with_value(mut self, name: &EnvVarName, value: impl Into) -> Self { + self.0.insert( + name.clone(), + EnvVar { + name: name.to_string(), + value: Some(value.into()), + value_from: None, + }, + ); + + self + } + + /// Adds an environment variable with the given name and field path to this set + /// + /// An [`EnvVar`] with the same name is overridden. + pub fn with_field_path(mut self, name: &EnvVarName, field_path: FieldPathEnvVar) -> Self { + self.0.insert( + name.clone(), + EnvVar { + name: name.to_string(), + value: None, + value_from: Some(EnvVarSource { + field_ref: Some(ObjectFieldSelector { + field_path: field_path.to_string(), + ..ObjectFieldSelector::default() + }), + ..EnvVarSource::default() + }), + }, + ); + + self + } + + /// Adds an environment variable with the given ConfigMap key reference to this set + /// + /// An [`EnvVar`] with the same name is overridden. + pub fn with_config_map_key_ref( + mut self, + name: &EnvVarName, + config_map_name: &ConfigMapName, + config_map_key: &ConfigMapKey, + ) -> Self { + self.0.insert( + name.clone(), + EnvVar { + name: name.to_string(), + value: None, + value_from: Some(EnvVarSource { + config_map_key_ref: Some(ConfigMapKeySelector { + key: config_map_key.to_string(), + name: config_map_name.to_string(), + ..ConfigMapKeySelector::default() + }), + ..EnvVarSource::default() + }), + }, + ); + + self + } +} + +impl From for Vec { + fn from(value: EnvVarSet) -> Self { + value.0.values().cloned().collect() + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use stackable_operator::{ + builder::pod::container::FieldPathEnvVar, + k8s_openapi::api::core::v1::{ + ConfigMapKeySelector, EnvVar, EnvVarSource, ObjectFieldSelector, + }, + }; + + use super::{EnvVarName, EnvVarSet}; + use crate::framework::{ + builder::pod::container::new_container_builder, + types::kubernetes::{ConfigMapKey, ConfigMapName, ContainerName}, + }; + + #[test] + fn test_envvarname_fromstr() { + // actually accepted by Kubernetes + assert!(EnvVarName::from_str(" !\"#$%&'()*+,-./0123456789:;<>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~").is_ok()); + + // empty string + assert!(EnvVarName::from_str("").is_err()); + // non-printable ASCII characters + assert!(EnvVarName::from_str("\n").is_err()); + assert!(EnvVarName::from_str("€").is_err()); + // equals sign + assert!(EnvVarName::from_str("=").is_err()); + } + + #[test] + fn test_new_container_builder() { + // Test that the function does not panic + new_container_builder(&ContainerName::from_str_unsafe("valid-container-name")); + } + + #[test] + fn test_envvarname_format() { + assert_eq!( + "TEST".to_owned(), + format!("{}", EnvVarName::from_str_unsafe("TEST")) + ); + } + + #[test] + fn test_envvarset_merge() { + let env_var_set1 = EnvVarSet::new().with_values([ + ( + EnvVarName::from_str_unsafe("ENV1"), + "value1 from env_var_set1", + ), + ( + EnvVarName::from_str_unsafe("ENV2"), + "value2 from env_var_set1", + ), + ( + EnvVarName::from_str_unsafe("ENV3"), + "value3 from env_var_set1", + ), + ]); + let env_var_set2 = EnvVarSet::new() + .with_value( + &EnvVarName::from_str_unsafe("ENV2"), + "value2 from env_var_set2", + ) + .with_field_path(&EnvVarName::from_str_unsafe("ENV3"), FieldPathEnvVar::Name) + .with_value( + &EnvVarName::from_str_unsafe("ENV4"), + "value4 from env_var_set2", + ); + + let merged_env_var_set = env_var_set1.merge(env_var_set2); + + assert_eq!( + vec![ + EnvVar { + name: "ENV1".to_owned(), + value: Some("value1 from env_var_set1".to_owned()), + value_from: None + }, + EnvVar { + name: "ENV2".to_owned(), + value: Some("value2 from env_var_set2".to_owned()), + value_from: None + }, + EnvVar { + name: "ENV3".to_owned(), + value: None, + value_from: Some(EnvVarSource { + field_ref: Some(ObjectFieldSelector { + field_path: "metadata.name".to_owned(), + ..ObjectFieldSelector::default() + }), + ..EnvVarSource::default() + }), + }, + EnvVar { + name: "ENV4".to_owned(), + value: Some("value4 from env_var_set2".to_owned()), + value_from: None + } + ], + Vec::from(merged_env_var_set) + ); + } + + #[test] + fn test_envvarset_with_values() { + let env_var_set = EnvVarSet::new().with_values([ + (EnvVarName::from_str_unsafe("ENV1"), "value1"), + (EnvVarName::from_str_unsafe("ENV2"), "value2"), + ]); + + assert_eq!( + vec![ + EnvVar { + name: "ENV1".to_owned(), + value: Some("value1".to_owned()), + value_from: None + }, + EnvVar { + name: "ENV2".to_owned(), + value: Some("value2".to_owned()), + value_from: None + } + ], + Vec::from(env_var_set) + ); + } + + #[test] + fn test_envvarset_with_value() { + let env_var_set = EnvVarSet::new().with_value(&EnvVarName::from_str_unsafe("ENV"), "value"); + + assert_eq!( + Some(&EnvVar { + name: "ENV".to_owned(), + value: Some("value".to_owned()), + value_from: None + }), + env_var_set.get(&EnvVarName::from_str_unsafe("ENV")) + ); + } + + #[test] + fn test_envvarset_with_field_path() { + let env_var_set = EnvVarSet::new() + .with_field_path(&EnvVarName::from_str_unsafe("ENV"), FieldPathEnvVar::Name); + + assert_eq!( + Some(&EnvVar { + name: "ENV".to_owned(), + value: None, + value_from: Some(EnvVarSource { + field_ref: Some(ObjectFieldSelector { + field_path: "metadata.name".to_owned(), + ..ObjectFieldSelector::default() + }), + ..EnvVarSource::default() + }), + }), + env_var_set.get(&EnvVarName::from_str_unsafe("ENV")) + ); + } + + #[test] + fn test_envvarset_with_config_map_key_ref() { + let env_var_set = EnvVarSet::new().with_config_map_key_ref( + &EnvVarName::from_str_unsafe("ENV"), + &ConfigMapName::from_str_unsafe("config-map"), + &ConfigMapKey::from_str_unsafe("key"), + ); + + assert_eq!( + Some(&EnvVar { + name: "ENV".to_owned(), + value: None, + value_from: Some(EnvVarSource { + config_map_key_ref: Some(ConfigMapKeySelector { + key: "key".to_owned(), + name: "config-map".to_owned(), + ..ConfigMapKeySelector::default() + }), + ..EnvVarSource::default() + }), + }), + env_var_set.get(&EnvVarName::from_str_unsafe("ENV")) + ); + } +} diff --git a/rust/operator-binary/src/framework/builder/pod/volume.rs b/rust/operator-binary/src/framework/builder/pod/volume.rs new file mode 100644 index 00000000..06dc4846 --- /dev/null +++ b/rust/operator-binary/src/framework/builder/pod/volume.rs @@ -0,0 +1,48 @@ +use stackable_operator::{ + builder::pod::volume::ListenerOperatorVolumeSourceBuilder, + k8s_openapi::api::core::v1::PersistentVolumeClaim, kvp::Labels, +}; + +use crate::framework::types::kubernetes::{ + ListenerClassName, ListenerName, PersistentVolumeClaimName, +}; + +/// Infallible variant of [`stackable_operator::builder::pod::volume::ListenerReference`] +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ListenerReference { + ListenerClass(ListenerClassName), + Listener(ListenerName), +} + +impl From<&ListenerReference> for stackable_operator::builder::pod::volume::ListenerReference { + fn from(value: &ListenerReference) -> Self { + match value { + ListenerReference::ListenerClass(listener_class_name) => { + stackable_operator::builder::pod::volume::ListenerReference::ListenerClass( + listener_class_name.to_string(), + ) + } + ListenerReference::Listener(listener_name) => { + stackable_operator::builder::pod::volume::ListenerReference::ListenerName( + listener_name.to_string(), + ) + } + } + } +} + +/// Infallible variant of +/// [`stackable_operator::builder::pod::volume::ListenerOperatorVolumeSourceBuilder::build_pvc`] +pub fn listener_operator_volume_source_builder_build_pvc( + listener_reference: &ListenerReference, + labels: &Labels, + pvc_name: &PersistentVolumeClaimName, +) -> PersistentVolumeClaim { + ListenerOperatorVolumeSourceBuilder::new(&listener_reference.into(), labels) + .build_pvc(pvc_name.to_string()) + .expect( + "should return a PersistentVolumeClaim, because the only check is that \ + listener_reference is a valid annotation value and there are no restrictions on single \ + annotation values", + ) +} diff --git a/rust/operator-binary/src/framework/builder/statefulset.rs b/rust/operator-binary/src/framework/builder/statefulset.rs new file mode 100644 index 00000000..904d333b --- /dev/null +++ b/rust/operator-binary/src/framework/builder/statefulset.rs @@ -0,0 +1,118 @@ +use std::collections::BTreeMap; + +use stackable_operator::kvp::Annotations; + +use crate::framework::types::kubernetes::{ConfigMapName, SecretName}; + +/// Creates `restarter.stackable.tech/ignore-configmap.{i}` annotations for each given ConfigMap. +/// +/// The restarter uses these annotations to skip restarting Pods when specific ConfigMaps change. +/// Indices start at 0 and are assigned in iteration order, so **do not merge the result with +/// annotations from another call** — duplicate indices would overwrite each other. +pub fn restarter_ignore_configmap_annotations( + ignored_config_maps: impl IntoIterator, +) -> Annotations { + let annotation_key_values = ignored_config_maps + .into_iter() + .enumerate() + .map(|(i, config_map_name)| { + ( + format!("restarter.stackable.tech/ignore-configmap.{i}"), + config_map_name.to_string(), + ) + }) + .collect::>(); + + Annotations::try_from(annotation_key_values).expect( + "should contain only valid annotations because the annotation keys are statically \ + defined apart from the index number and the names of ConfigMaps are valid annotation \ + values.", + ) +} + +/// Creates `restarter.stackable.tech/ignore-secret.{i}` annotations for each given Secret. +/// +/// The restarter uses these annotations to skip restarting Pods when specific Secrets change. +/// Indices start at 0 and are assigned in iteration order, so **do not merge the result with +/// annotations from another call** — duplicate indices would overwrite each other. +pub fn restarter_ignore_secret_annotations( + ignored_secrets: impl IntoIterator, +) -> Annotations { + let annotation_key_values = ignored_secrets + .into_iter() + .enumerate() + .map(|(i, secret_name)| { + ( + format!("restarter.stackable.tech/ignore-secret.{i}"), + secret_name.to_string(), + ) + }) + .collect::>(); + + Annotations::try_from(annotation_key_values).expect( + "should contain only valid annotations because the annotation keys are statically \ + defined apart from the index number and the names of Secrets are valid annotation \ + values.", + ) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn multiple_config_maps_produce_indexed_annotations() { + let ignored_config_maps = [ + ConfigMapName::from_str_unsafe("first-config"), + ConfigMapName::from_str_unsafe("second-config"), + ConfigMapName::from_str_unsafe("third-config"), + ]; + + let actual_annotations = restarter_ignore_configmap_annotations(ignored_config_maps); + + let expected_annotations = BTreeMap::from([ + ( + "restarter.stackable.tech/ignore-configmap.0".to_owned(), + "first-config".to_owned(), + ), + ( + "restarter.stackable.tech/ignore-configmap.1".to_owned(), + "second-config".to_owned(), + ), + ( + "restarter.stackable.tech/ignore-configmap.2".to_owned(), + "third-config".to_owned(), + ), + ]); + + assert_eq!(expected_annotations, actual_annotations.into()); + } + + #[test] + fn multiple_secrets_produce_indexed_annotations() { + let ignored_secrets = [ + SecretName::from_str_unsafe("first-secret"), + SecretName::from_str_unsafe("second-secret"), + SecretName::from_str_unsafe("third-secret"), + ]; + + let actual_annotations = restarter_ignore_secret_annotations(ignored_secrets); + + let expected_annotations = BTreeMap::from([ + ( + "restarter.stackable.tech/ignore-secret.0".to_owned(), + "first-secret".to_owned(), + ), + ( + "restarter.stackable.tech/ignore-secret.1".to_owned(), + "second-secret".to_owned(), + ), + ( + "restarter.stackable.tech/ignore-secret.2".to_owned(), + "third-secret".to_owned(), + ), + ]); + + assert_eq!(expected_annotations, actual_annotations.into()); + } +} diff --git a/rust/operator-binary/src/framework/cluster_resources.rs b/rust/operator-binary/src/framework/cluster_resources.rs new file mode 100644 index 00000000..430b534f --- /dev/null +++ b/rust/operator-binary/src/framework/cluster_resources.rs @@ -0,0 +1,50 @@ +use stackable_operator::{ + cluster_resources::{ClusterResourceApplyStrategy, ClusterResources}, + deep_merger::ObjectOverrides, + k8s_openapi::api::core::v1::ObjectReference, +}; + +use super::types::{ + kubernetes::{NamespaceName, Uid}, + operator::{ClusterName, ControllerName, OperatorName, ProductName}, +}; +use crate::framework::{ + NameIsValidLabelValue, macros::attributed_string_type::MAX_LABEL_VALUE_LENGTH, +}; + +/// Infallible variant of [`stackable_operator::cluster_resources::ClusterResources::new`] +#[allow(clippy::too_many_arguments)] +pub fn cluster_resources_new<'a>( + product_name: &ProductName, + operator_name: &OperatorName, + controller_name: &ControllerName, + cluster_name: &ClusterName, + cluster_namespace: &NamespaceName, + cluster_uid: &Uid, + apply_strategy: ClusterResourceApplyStrategy, + object_overrides: &'a ObjectOverrides, +) -> ClusterResources<'a> { + // compile-time check + // ClusterResources::new creates a label value from the given app name by appending + // `-operator`. For the resulting label value to be valid, it must not exceed + // MAX_LABEL_VALUE_LENGTH. + const _: () = assert!( + ProductName::MAX_LENGTH + "-operator".len() <= MAX_LABEL_VALUE_LENGTH, + "The string `-operator` must not exceed the limit of Label names." + ); + + ClusterResources::new( + &product_name.to_label_value(), + &operator_name.to_label_value(), + &controller_name.to_label_value(), + &ObjectReference { + name: Some(cluster_name.to_string()), + namespace: Some(cluster_namespace.to_string()), + uid: Some(cluster_uid.to_string()), + ..Default::default() + }, + apply_strategy, + object_overrides, + ) + .expect("ClusterResources should be created because the cluster object reference contains name, namespace and uid.") +} diff --git a/rust/operator-binary/src/framework/controller_utils.rs b/rust/operator-binary/src/framework/controller_utils.rs new file mode 100644 index 00000000..d15e53f5 --- /dev/null +++ b/rust/operator-binary/src/framework/controller_utils.rs @@ -0,0 +1,211 @@ +//! Helper functions which are not tied to a specific controller step + +use std::str::FromStr; + +use snafu::{OptionExt, ResultExt, Snafu}; +use stackable_operator::kube::runtime::reflector::Lookup; +use strum::{EnumDiscriminants, IntoStaticStr}; + +use crate::framework::types::{ + kubernetes::{NamespaceName, Uid}, + operator::ClusterName, +}; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("failed to get the cluster name"))] + GetClusterName {}, + + #[snafu(display("failed to get the namespace"))] + GetNamespace {}, + + #[snafu(display("failed to get the UID"))] + GetUid {}, + + #[snafu(display("failed to set the cluster name"))] + ParseClusterName { + source: crate::framework::macros::attributed_string_type::Error, + }, + + #[snafu(display("failed to set the namespace"))] + ParseNamespace { + source: crate::framework::macros::attributed_string_type::Error, + }, + + #[snafu(display("failed to set the UID"))] + ParseUid { + source: crate::framework::macros::attributed_string_type::Error, + }, +} + +type Result = std::result::Result; + +/// Get the cluster name from the given resource +pub fn get_cluster_name(cluster: &impl Lookup) -> Result { + let raw_cluster_name = cluster.name().context(GetClusterNameSnafu)?; + let cluster_name = ClusterName::from_str(&raw_cluster_name).context(ParseClusterNameSnafu)?; + + Ok(cluster_name) +} + +/// Get the namespace from the given resource +pub fn get_namespace(resource: &impl Lookup) -> Result { + let raw_namespace = resource.namespace().context(GetNamespaceSnafu)?; + let namespace = NamespaceName::from_str(&raw_namespace).context(ParseNamespaceSnafu)?; + + Ok(namespace) +} + +/// Get the UID from the given resource +pub fn get_uid(resource: &impl Lookup) -> Result { + let raw_uid = resource.uid().context(GetUidSnafu)?; + let uid = Uid::from_str(&raw_uid).context(ParseUidSnafu)?; + + Ok(uid) +} + +#[cfg(test)] +mod tests { + use stackable_operator::kube::runtime::reflector::Lookup; + use uuid::uuid; + + use super::{ErrorDiscriminants, get_cluster_name, get_namespace, get_uid}; + use crate::framework::types::{ + kubernetes::{NamespaceName, Uid}, + operator::ClusterName, + }; + + #[derive(Debug, Default)] + struct TestResource { + name: Option<&'static str>, + namespace: Option<&'static str>, + uid: Option<&'static str>, + } + + impl Lookup for TestResource { + type DynamicType = (); + + fn kind(_dyntype: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + "TestResource".into() + } + + fn group(_dyntype: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + "stackable.tech".into() + } + + fn version(_dyntype: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + "v1".into() + } + + fn plural(_dyntype: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + "testresources".into() + } + + fn name(&self) -> Option> { + self.name.map(std::borrow::Cow::Borrowed) + } + + fn namespace(&self) -> Option> { + self.namespace.map(std::borrow::Cow::Borrowed) + } + + fn resource_version(&self) -> Option> { + Some("1".into()) + } + + fn uid(&self) -> Option> { + self.uid.map(std::borrow::Cow::Borrowed) + } + } + + #[test] + fn test_get_cluster_name() { + assert_eq!( + ClusterName::from_str_unsafe("test-cluster"), + get_cluster_name(&TestResource { + name: Some("test-cluster"), + ..TestResource::default() + }) + .expect("should contain a valid cluster name") + ); + + assert_eq!( + Err(ErrorDiscriminants::GetClusterName), + get_cluster_name(&TestResource { + name: None, + ..TestResource::default() + }) + .map_err(ErrorDiscriminants::from) + ); + + assert_eq!( + Err(ErrorDiscriminants::ParseClusterName), + get_cluster_name(&TestResource { + name: Some("invalid cluster name"), + ..TestResource::default() + }) + .map_err(ErrorDiscriminants::from) + ); + } + + #[test] + fn test_get_namespace() { + assert_eq!( + NamespaceName::from_str_unsafe("test-namespace"), + get_namespace(&TestResource { + namespace: Some("test-namespace"), + ..TestResource::default() + }) + .expect("should contain a valid namespace") + ); + + assert_eq!( + Err(ErrorDiscriminants::GetNamespace), + get_namespace(&TestResource { + namespace: None, + ..TestResource::default() + }) + .map_err(ErrorDiscriminants::from) + ); + + assert_eq!( + Err(ErrorDiscriminants::ParseNamespace), + get_namespace(&TestResource { + namespace: Some("invalid namespace"), + ..TestResource::default() + }) + .map_err(ErrorDiscriminants::from) + ); + } + + #[test] + fn test_get_uid() { + assert_eq!( + Uid::from(uuid!("e6ac237d-a6d4-43a1-8135-f36506110912")), + get_uid(&TestResource { + uid: Some("e6ac237d-a6d4-43a1-8135-f36506110912"), + ..TestResource::default() + }) + .expect("should contain a valid UID") + ); + + assert_eq!( + Err(ErrorDiscriminants::GetUid), + get_uid(&TestResource { + uid: None, + ..TestResource::default() + }) + .map_err(ErrorDiscriminants::from) + ); + + assert_eq!( + Err(ErrorDiscriminants::ParseUid), + get_uid(&TestResource { + uid: Some("invalid UID"), + ..TestResource::default() + }) + .map_err(ErrorDiscriminants::from) + ); + } +} diff --git a/rust/operator-binary/src/framework/kvp.rs b/rust/operator-binary/src/framework/kvp.rs new file mode 100644 index 00000000..0006163a --- /dev/null +++ b/rust/operator-binary/src/framework/kvp.rs @@ -0,0 +1 @@ +pub mod label; diff --git a/rust/operator-binary/src/framework/kvp/label.rs b/rust/operator-binary/src/framework/kvp/label.rs new file mode 100644 index 00000000..72120fde --- /dev/null +++ b/rust/operator-binary/src/framework/kvp/label.rs @@ -0,0 +1,195 @@ +use stackable_operator::{ + kube::Resource, + kvp::{Labels, ObjectLabels}, +}; + +use crate::framework::{ + HasName, NameIsValidLabelValue, + types::operator::{ + ControllerName, OperatorName, ProductName, ProductVersion, RoleGroupName, RoleName, + }, +}; + +/// Infallible variant of [`stackable_operator::kvp::Labels::recommended`] +pub fn recommended_labels( + owner: &(impl Resource + HasName + NameIsValidLabelValue), + product_name: &ProductName, + product_version: &ProductVersion, + operator_name: &OperatorName, + controller_name: &ControllerName, + role_name: &RoleName, + role_group_name: &RoleGroupName, +) -> Labels { + let object_labels = ObjectLabels { + owner, + app_name: &product_name.to_label_value(), + app_version: &product_version.to_label_value(), + operator_name: &operator_name.to_label_value(), + controller_name: &controller_name.to_label_value(), + role: &role_name.to_label_value(), + role_group: &role_group_name.to_label_value(), + }; + Labels::recommended(&object_labels).expect( + "Labels should be created because the owner has an object name and all given parameters \ + produce valid label values.", + ) +} + +/// Infallible variant of [`stackable_operator::kvp::Labels::role_selector`] +pub fn role_selector( + owner: &(impl Resource + HasName + NameIsValidLabelValue), + product_name: &ProductName, + role_name: &RoleName, +) -> Labels { + Labels::role_selector( + owner, + &product_name.to_label_value(), + &role_name.to_label_value(), + ) + .expect("Labels should be created because all given parameters produce valid label values") +} + +/// Infallible variant of [`stackable_operator::kvp::Labels::role_group_selector`] +pub fn role_group_selector( + owner: &(impl Resource + HasName + NameIsValidLabelValue), + product_name: &ProductName, + role_name: &RoleName, + role_group_name: &RoleGroupName, +) -> Labels { + Labels::role_group_selector( + owner, + &product_name.to_label_value(), + &role_name.to_label_value(), + &role_group_name.to_label_value(), + ) + .expect("Labels should be created because all given parameters produce valid label values") +} + +#[cfg(test)] +mod tests { + use std::{borrow::Cow, collections::BTreeMap}; + + use stackable_operator::{ + k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta, kube::Resource, + }; + + use crate::framework::{ + HasName, NameIsValidLabelValue, + kvp::label::{recommended_labels, role_group_selector, role_selector}, + types::operator::{ + ControllerName, OperatorName, ProductName, ProductVersion, RoleGroupName, RoleName, + }, + }; + + struct Cluster { + object_meta: ObjectMeta, + } + + impl Cluster { + fn new() -> Self { + Cluster { + object_meta: ObjectMeta { + name: Some("cluster-name".to_owned()), + ..ObjectMeta::default() + }, + } + } + } + + impl Resource for Cluster { + type DynamicType = (); + type Scope = (); + + fn kind(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("HiveCluster") } + fn group(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("hive.stackable.tech") } + fn version(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("v1alpha1") } + fn plural(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("hiveclusters") } + + fn meta(&self) -> &ObjectMeta { + &self.object_meta + } + + fn meta_mut(&mut self) -> &mut ObjectMeta { + &mut self.object_meta + } + } + + impl HasName for Cluster { + fn to_name(&self) -> String { + self.object_meta.name.clone().expect("set in new()") + } + } + + impl NameIsValidLabelValue for Cluster { + fn to_label_value(&self) -> String { + self.object_meta.name.clone().expect("set in new()") + } + } + + #[test] + fn test_recommended_labels() { + let actual_labels = recommended_labels( + &Cluster::new(), + &ProductName::from_str_unsafe("my-product"), + &ProductVersion::from_str_unsafe("1.0.0"), + &OperatorName::from_str_unsafe("my-operator"), + &ControllerName::from_str_unsafe("my-controller"), + &RoleName::from_str_unsafe("my-role"), + &RoleGroupName::from_str_unsafe("my-role-group"), + ); + + let expected_labels: BTreeMap = [ + ("app.kubernetes.io/component", "my-role"), + ("app.kubernetes.io/instance", "cluster-name"), + ("app.kubernetes.io/managed-by", "my-operator_my-controller"), + ("app.kubernetes.io/name", "my-product"), + ("app.kubernetes.io/role-group", "my-role-group"), + ("app.kubernetes.io/version", "1.0.0"), + ("stackable.tech/vendor", "Stackable"), + ] + .map(|(k, v)| (k.to_owned(), v.to_owned())) + .into(); + + assert_eq!(expected_labels, actual_labels.into()); + } + + #[test] + fn test_role_selector() { + let actual_labels = role_selector( + &Cluster::new(), + &ProductName::from_str_unsafe("my-product"), + &RoleName::from_str_unsafe("my-role"), + ); + + let expected_labels: BTreeMap = [ + ("app.kubernetes.io/component", "my-role"), + ("app.kubernetes.io/instance", "cluster-name"), + ("app.kubernetes.io/name", "my-product"), + ] + .map(|(k, v)| (k.to_owned(), v.to_owned())) + .into(); + + assert_eq!(expected_labels, actual_labels.into()); + } + + #[test] + fn test_role_group_selector() { + let actual_labels = role_group_selector( + &Cluster::new(), + &ProductName::from_str_unsafe("my-product"), + &RoleName::from_str_unsafe("my-role"), + &RoleGroupName::from_str_unsafe("my-role-group"), + ); + + let expected_labels: BTreeMap = [ + ("app.kubernetes.io/component", "my-role"), + ("app.kubernetes.io/instance", "cluster-name"), + ("app.kubernetes.io/name", "my-product"), + ("app.kubernetes.io/role-group", "my-role-group"), + ] + .map(|(k, v)| (k.to_owned(), v.to_owned())) + .into(); + + assert_eq!(expected_labels, actual_labels.into()); + } +} diff --git a/rust/operator-binary/src/framework/product_logging.rs b/rust/operator-binary/src/framework/product_logging.rs new file mode 100644 index 00000000..0c717499 --- /dev/null +++ b/rust/operator-binary/src/framework/product_logging.rs @@ -0,0 +1 @@ +pub mod framework; diff --git a/rust/operator-binary/src/framework/product_logging/framework.rs b/rust/operator-binary/src/framework/product_logging/framework.rs new file mode 100644 index 00000000..a4a5c8b8 --- /dev/null +++ b/rust/operator-binary/src/framework/product_logging/framework.rs @@ -0,0 +1,127 @@ +use std::fmt::Display; + +use snafu::{OptionExt, ResultExt, Snafu}; +use stackable_operator::product_logging::spec::{ + AutomaticContainerLogConfig, ConfigMapLogConfig, ContainerLogConfig, ContainerLogConfigChoice, + CustomContainerLogConfig, Logging, +}; +use strum::{EnumDiscriminants, IntoStaticStr}; + +use crate::framework::types::kubernetes::ConfigMapName; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("failed to get container log configuration for container {container}"))] + GetContainerLogConfiguration { container: String }, + + #[snafu(display("failed to parse ConfigMap name for custom log configuration"))] + ParseConfigMapName { + source: crate::framework::macros::attributed_string_type::Error, + }, +} + +#[derive(Clone, Debug)] +pub enum ValidatedContainerLogConfigChoice { + Automatic(AutomaticContainerLogConfig), + Custom(ConfigMapName), +} + +impl ValidatedContainerLogConfigChoice { + /// Converts back to the raw upstream type for use at API boundaries + /// (e.g. calling `product_logging::framework::vector_container`). + pub fn to_raw_container_log_config(&self) -> ContainerLogConfig { + match self { + Self::Automatic(auto) => ContainerLogConfig { + choice: Some(ContainerLogConfigChoice::Automatic(auto.clone())), + }, + Self::Custom(name) => ContainerLogConfig { + choice: Some(ContainerLogConfigChoice::Custom(CustomContainerLogConfig { + custom: ConfigMapLogConfig { + config_map: name.to_string(), + }, + })), + }, + } + } +} + +#[derive(Clone, Debug)] +pub struct VectorContainerLogConfig { + pub log_config: ValidatedContainerLogConfigChoice, +} + +pub fn validate_logging_configuration_for_container( + logging: &Logging, + container: T, +) -> Result +where + T: Clone + Display + Ord, +{ + use std::str::FromStr; + + let config = logging + .containers + .get(&container) + .and_then(|c| c.choice.as_ref()) + .context(GetContainerLogConfigurationSnafu { + container: container.to_string(), + })?; + + match config { + ContainerLogConfigChoice::Automatic(automatic) => Ok( + ValidatedContainerLogConfigChoice::Automatic(automatic.clone()), + ), + ContainerLogConfigChoice::Custom(custom) => { + let config_map_name = ConfigMapName::from_str(&custom.custom.config_map) + .context(ParseConfigMapNameSnafu)?; + Ok(ValidatedContainerLogConfigChoice::Custom(config_map_name)) + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeMap; + + use stackable_operator::product_logging::spec::{ + AutomaticContainerLogConfig, ContainerLogConfig, ContainerLogConfigChoice, Logging, + }; + + use super::*; + use crate::crd::Container; + + fn logging_with_automatic_config() -> Logging { + let mut containers = BTreeMap::new(); + containers.insert( + Container::Hive, + ContainerLogConfig { + choice: Some(ContainerLogConfigChoice::Automatic( + AutomaticContainerLogConfig::default(), + )), + }, + ); + Logging { + enable_vector_agent: false, + containers, + } + } + + #[test] + fn test_validate_automatic_log_config() { + let logging = logging_with_automatic_config(); + let result = validate_logging_configuration_for_container(&logging, Container::Hive); + assert!(result.is_ok()); + assert!(matches!( + result.unwrap(), + ValidatedContainerLogConfigChoice::Automatic(_) + )); + } + + #[test] + fn test_validate_missing_container_config() { + let logging = logging_with_automatic_config(); + let result = validate_logging_configuration_for_container(&logging, Container::Vector); + assert!(result.is_err()); + } +} diff --git a/rust/operator-binary/src/framework/role_group_utils.rs b/rust/operator-binary/src/framework/role_group_utils.rs new file mode 100644 index 00000000..022a832f --- /dev/null +++ b/rust/operator-binary/src/framework/role_group_utils.rs @@ -0,0 +1,151 @@ +use std::str::FromStr; + +use super::types::{ + kubernetes::{ConfigMapName, ListenerName, ServiceName, StatefulSetName}, + operator::{ClusterName, RoleGroupName, RoleName}, +}; +use crate::attributed_string_type; + +attributed_string_type! { + QualifiedRoleGroupName, + "A qualified role group name consisting of the cluster name, role name and role-group name. It is a valid label name as defined in RFC 1035 that can be used e.g. as a name for a Service or a StatefulSet.", + "hive-metastore-default", + // Suffixes are added to produce resource names. According compile-time checks ensure that + // max_length cannot be set higher. + (max_length = 52), + is_rfc_1035_label_name, + is_valid_label_value +} + +/// Type-safe names for role-group resources +pub struct ResourceNames { + pub cluster_name: ClusterName, + pub role_name: RoleName, + pub role_group_name: RoleGroupName, +} + +impl ResourceNames { + /// Creates a qualified role group name in the format + /// `--` + fn qualified_role_group_name(&self) -> QualifiedRoleGroupName { + // compile-time checks + const _: () = assert!( + ClusterName::MAX_LENGTH + + 1 // dash + + RoleName::MAX_LENGTH + + 1 // dash + + RoleGroupName::MAX_LENGTH + <= QualifiedRoleGroupName::MAX_LENGTH, + "The string `--` must not exceed the limit \ + of RFC 1035 label names." + ); + // qualified_role_group_name is only an RFC 1035 label name if it starts with an + // alphabetic character, therefore cluster_name must also be an RFC 1035 label name. + // role_name and role_group_name and the middle of the qualified_role_group_name can + // be RFC 1123 label names because digits are allowed there. + let _ = ClusterName::IS_RFC_1035_LABEL_NAME; + let _ = RoleName::IS_RFC_1123_LABEL_NAME; + let _ = RoleGroupName::IS_RFC_1123_LABEL_NAME; + + QualifiedRoleGroupName::from_str(&format!( + "{}-{}-{}", + self.cluster_name, self.role_name, self.role_group_name, + )) + .expect("should be a valid QualifiedRoleGroupName") + } + + pub fn role_group_config_map(&self) -> ConfigMapName { + // compile-time check + const _: () = assert!( + QualifiedRoleGroupName::MAX_LENGTH <= ConfigMapName::MAX_LENGTH, + "The string `--` must not exceed the limit of \ + ConfigMap names." + ); + let _ = QualifiedRoleGroupName::IS_RFC_1123_SUBDOMAIN_NAME; + + ConfigMapName::from_str(self.qualified_role_group_name().as_ref()) + .expect("should be a valid ConfigMap name") + } + + pub fn stateful_set_name(&self) -> StatefulSetName { + // compile-time checks + const _: () = assert!( + QualifiedRoleGroupName::MAX_LENGTH <= StatefulSetName::MAX_LENGTH, + "The string `--` must not exceed the \ + limit of StatefulSet names." + ); + let _ = QualifiedRoleGroupName::IS_RFC_1123_LABEL_NAME; + let _ = QualifiedRoleGroupName::IS_VALID_LABEL_VALUE; + + StatefulSetName::from_str(self.qualified_role_group_name().as_ref()) + .expect("should be a valid StatefulSet name") + } + + pub fn headless_service_name(&self) -> ServiceName { + const SUFFIX: &str = "-headless"; + + const _: () = assert!( + QualifiedRoleGroupName::MAX_LENGTH + SUFFIX.len() <= ServiceName::MAX_LENGTH, + "The string `---headless` must not exceed the \ + limit of Service names." + ); + let _ = QualifiedRoleGroupName::IS_RFC_1035_LABEL_NAME; + let _ = QualifiedRoleGroupName::IS_VALID_LABEL_VALUE; + + ServiceName::from_str(&format!("{}{SUFFIX}", self.qualified_role_group_name())) + .expect("should be a valid Service name") + } + + pub fn listener_name(&self) -> ListenerName { + const _: () = assert!( + QualifiedRoleGroupName::MAX_LENGTH <= ListenerName::MAX_LENGTH, + "The string `--` must not exceed the limit of \ + Listener names." + ); + let _ = QualifiedRoleGroupName::IS_RFC_1123_SUBDOMAIN_NAME; + + ListenerName::from_str(self.qualified_role_group_name().as_ref()) + .expect("should be a valid Listener name") + } +} + +#[cfg(test)] +mod tests { + use super::{ClusterName, RoleGroupName, RoleName}; + use crate::framework::{ + role_group_utils::{QualifiedRoleGroupName, ResourceNames}, + types::kubernetes::{ConfigMapName, ListenerName, ServiceName, StatefulSetName}, + }; + + #[test] + fn test_resource_names() { + QualifiedRoleGroupName::test_example(); + + let resource_names = ResourceNames { + cluster_name: ClusterName::from_str_unsafe("test-cluster"), + role_name: RoleName::from_str_unsafe("metastore"), + role_group_name: RoleGroupName::from_str_unsafe("default"), + }; + + assert_eq!( + QualifiedRoleGroupName::from_str_unsafe("test-cluster-metastore-default"), + resource_names.qualified_role_group_name() + ); + assert_eq!( + ConfigMapName::from_str_unsafe("test-cluster-metastore-default"), + resource_names.role_group_config_map() + ); + assert_eq!( + StatefulSetName::from_str_unsafe("test-cluster-metastore-default"), + resource_names.stateful_set_name() + ); + assert_eq!( + ServiceName::from_str_unsafe("test-cluster-metastore-default-headless"), + resource_names.headless_service_name() + ); + assert_eq!( + ListenerName::from_str_unsafe("test-cluster-metastore-default"), + resource_names.listener_name() + ); + } +} diff --git a/rust/operator-binary/src/framework/role_utils.rs b/rust/operator-binary/src/framework/role_utils.rs new file mode 100644 index 00000000..cb009721 --- /dev/null +++ b/rust/operator-binary/src/framework/role_utils.rs @@ -0,0 +1,368 @@ +use std::{ + collections::{BTreeMap, HashMap}, + str::FromStr, +}; + +use serde::{Deserialize, Serialize}; +use stackable_operator::{ + config::{ + fragment::{self, FromFragment}, + merge::{Merge, merge}, + }, + k8s_openapi::{DeepMerge, api::core::v1::PodTemplateSpec}, + role_utils::{CommonConfiguration, Role, RoleGroup}, + schemars::{self, JsonSchema}, +}; + +use super::{ + builder::pod::container::EnvVarSet, + types::{ + kubernetes::{ClusterRoleName, RoleBindingName, ServiceAccountName}, + operator::{ClusterName, ProductName}, + }, +}; + +/// Variant of [`stackable_operator::role_utils::GenericProductSpecificCommonConfig`] that +/// implements [`Merge`] +#[derive(Clone, Debug, Default, Deserialize, JsonSchema, PartialEq, Serialize)] +pub struct GenericProductSpecificCommonConfig {} + +impl Merge for GenericProductSpecificCommonConfig { + fn merge(&mut self, _defaults: &Self) {} +} + +/// Variant of [`stackable_operator::role_utils::RoleGroup`] that is easier to work with +/// +/// Differences are: +/// * `replicas` is non-optional. +/// * `config` is flattened. +/// * The [`HashMap`] in `env_overrides` is replaced with an [`EnvVarSet`]. +#[derive(Clone, Debug, PartialEq)] +pub struct RoleGroupConfig { + pub replicas: u16, + pub config: T, + pub config_overrides: HashMap>, + pub env_overrides: EnvVarSet, + pub cli_overrides: BTreeMap, + pub pod_overrides: PodTemplateSpec, + // allow(dead_code) is not necessary anymore when moved to operator-rs + #[allow(dead_code)] + pub product_specific_common_config: ProductSpecificCommonConfig, +} + +impl RoleGroupConfig { + pub fn cli_overrides_to_vec(&self) -> Vec { + self.cli_overrides + .clone() + .into_iter() + .flat_map(|(option, value)| [option, value]) + .collect() + } +} + +/// Variant of [`stackable_operator::role_utils::RoleGroup::validate_config`] with fixed types +/// +/// The `role` parameter takes the `ProductSpecificCommonConfig` into account. +pub fn validate_config( + role_group: &RoleGroup, + role: &Role, + default_config: &T, +) -> Result +where + C: FromFragment, + CommonConfig: Default + JsonSchema + Serialize, + ConfigOverrides: Default + JsonSchema + Serialize, + T: Merge + Clone, + RoleConfig: Default + JsonSchema + Serialize, +{ + let mut role_config = role.config.config.clone(); + role_config.merge(default_config); + let mut rolegroup_config = role_group.config.config.clone(); + rolegroup_config.merge(&role_config); + fragment::validate(rolegroup_config) +} + +/// Merges and validates the [`RoleGroup`] with the given `role` and `default_config` +pub fn with_validated_config( + role_group: &RoleGroup, + role: &Role, + default_config: &T, +) -> Result, fragment::ValidationError> +where + C: FromFragment, + CommonConfig: Clone + Default + JsonSchema + Merge + Serialize, + ConfigOverrides: Clone + Default + JsonSchema + Merge + Serialize, + T: Clone + Merge, + RoleConfig: Default + JsonSchema + Serialize, +{ + let validated_config = validate_config(role_group, role, default_config)?; + Ok(RoleGroup { + config: CommonConfiguration { + config: validated_config, + config_overrides: merge( + role_group.config.config_overrides.clone(), + &role.config.config_overrides, + ), + env_overrides: merged_env_overrides( + role.config.env_overrides.clone(), + role_group.config.env_overrides.clone(), + ), + cli_overrides: merged_cli_overrides( + role.config.cli_overrides.clone(), + role_group.config.cli_overrides.clone(), + ), + pod_overrides: merged_pod_overrides( + role.config.pod_overrides.clone(), + role_group.config.pod_overrides.clone(), + ), + product_specific_common_config: merge( + role_group.config.product_specific_common_config.clone(), + &role.config.product_specific_common_config, + ), + }, + replicas: role_group.replicas, + }) +} + +fn merged_env_overrides( + role_env_overrides: HashMap, + role_group_env_overrides: HashMap, +) -> HashMap { + let mut merged_env_overrides = role_env_overrides; + merged_env_overrides.extend(role_group_env_overrides); + merged_env_overrides +} + +fn merged_cli_overrides( + role_cli_overrides: BTreeMap, + role_group_cli_overrides: BTreeMap, +) -> BTreeMap { + let mut merged_cli_overrides = role_cli_overrides; + merged_cli_overrides.extend(role_group_cli_overrides); + merged_cli_overrides +} + +fn merged_pod_overrides( + role_pod_overrides: PodTemplateSpec, + role_group_pod_overrides: PodTemplateSpec, +) -> PodTemplateSpec { + let mut merged_pod_overrides = role_pod_overrides; + merged_pod_overrides.merge_from(role_group_pod_overrides); + merged_pod_overrides +} + +/// Type-safe names for role resources +pub struct ResourceNames { + pub cluster_name: ClusterName, + pub product_name: ProductName, +} + +impl ResourceNames { + pub fn service_account_name(&self) -> ServiceAccountName { + const SUFFIX: &str = "-serviceaccount"; + + // compile-time checks + const _: () = assert!( + ClusterName::MAX_LENGTH + SUFFIX.len() <= ServiceAccountName::MAX_LENGTH, + "The string `-serviceaccount` must not exceed the limit of ServiceAccount names." + ); + let _ = ClusterName::IS_RFC_1123_SUBDOMAIN_NAME; + + ServiceAccountName::from_str(&format!("{}{SUFFIX}", self.cluster_name)) + .expect("should be a valid ServiceAccount name") + } + + pub fn role_binding_name(&self) -> RoleBindingName { + const SUFFIX: &str = "-rolebinding"; + + // compile-time checks + const _: () = assert!( + ClusterName::MAX_LENGTH + SUFFIX.len() <= RoleBindingName::MAX_LENGTH, + "The string `-rolebinding` must not exceed the limit of RoleBinding names." + ); + let _ = ClusterName::IS_RFC_1123_SUBDOMAIN_NAME; + + RoleBindingName::from_str(&format!("{}{SUFFIX}", self.cluster_name)) + .expect("should be a valid RoleBinding name") + } + + pub fn cluster_role_name(&self) -> ClusterRoleName { + const SUFFIX: &str = "-clusterrole"; + + // compile-time checks + const _: () = assert!( + ProductName::MAX_LENGTH + SUFFIX.len() <= ClusterRoleName::MAX_LENGTH, + "The string `-clusterrole` must not exceed the limit of cluster role names." + ); + let _ = ProductName::IS_RFC_1123_SUBDOMAIN_NAME; + + ClusterRoleName::from_str(&format!("{}{SUFFIX}", self.product_name)) + .expect("should be a valid cluster role name") + } +} + +#[cfg(test)] +mod tests { + use std::collections::{BTreeMap, HashMap}; + + use rstest::*; + use serde::Serialize; + use stackable_operator::{ + config::{fragment::Fragment, merge::Merge}, + k8s_openapi::api::core::v1::PodTemplateSpec, + kube::api::ObjectMeta, + role_utils::{CommonConfiguration, GenericRoleConfig, Role, RoleGroup}, + schemars::{self, JsonSchema}, + }; + + use super::ResourceNames; + use crate::framework::{ + role_utils::with_validated_config, + types::{ + kubernetes::{ClusterRoleName, RoleBindingName, ServiceAccountName}, + operator::{ClusterName, ProductName}, + }, + }; + + #[derive(Debug, Fragment, PartialEq)] + #[fragment_attrs(derive(Clone, Debug, Default, Merge, PartialEq))] + struct Config { + property: String, + } + + impl Config { + fn new(value: &str) -> Self { + Self { + property: value.to_owned(), + } + } + } + + impl ConfigFragment { + fn new(value: Option<&str>) -> Self { + Self { + property: value.map(str::to_owned), + } + } + } + + #[derive(Clone, Debug, Default, JsonSchema, Merge, PartialEq, Serialize)] + struct ProductCommonConfig { + property: Option, + } + + #[derive(Clone, Debug, Default, JsonSchema, Merge, PartialEq, Serialize)] + struct TestConfigOverrides { + property: Option, + } + + fn new_common_config( + config: T, + override_value: Option<&str>, + ) -> CommonConfiguration { + let mut env_overrides = HashMap::new(); + let mut cli_overrides = BTreeMap::new(); + + if let Some(value) = override_value { + env_overrides.insert("PROPERTY".to_owned(), value.to_owned()); + cli_overrides.insert("--property".to_owned(), value.to_owned()); + } + + CommonConfiguration { + config, + config_overrides: TestConfigOverrides { + property: override_value.map(str::to_owned), + }, + env_overrides, + cli_overrides, + pod_overrides: PodTemplateSpec { + metadata: Some(ObjectMeta { + name: override_value.map(str::to_owned), + ..ObjectMeta::default() + }), + ..PodTemplateSpec::default() + }, + product_specific_common_config: ProductCommonConfig { + property: override_value.map(str::to_owned), + }, + } + } + + #[rstest] + #[case("role-group", Some("role-group"), Some("role-group"), Some("role"), Some("default"))] + #[case("role-group", Some("role-group"), Some("role-group"), Some("role"), None)] + #[case("role-group", Some("role-group"), Some("role-group"), None, Some("default"))] + #[case("role-group", Some("role-group"), Some("role-group"), None, None)] + #[case("role", Some("role"), None, Some("role"), Some("default"))] + #[case("role", Some("role"), None, Some("role"), None)] + #[case("default", None, None, None, Some("default"))] + fn test_with_validated_config_and_result_ok( + #[case] expected_config_value: &str, + #[case] expected_override_value: Option<&str>, + #[case] role_group_value: Option<&str>, + #[case] role_value: Option<&str>, + #[case] default_value: Option<&str>, + ) { + let role_group = RoleGroup { + config: new_common_config(ConfigFragment::new(role_group_value), role_group_value), + replicas: Some(3), + }; + let role = Role::<_, _, GenericRoleConfig, _> { + config: new_common_config(ConfigFragment::new(role_value), role_value), + ..Role::default() + }; + let default_config = ConfigFragment::new(default_value); + + let result = with_validated_config(&role_group, &role, &default_config); + + assert_eq!( + Some(RoleGroup { + config: new_common_config( + Config::new(expected_config_value), + expected_override_value + ), + replicas: Some(3) + }), + result.ok() + ) + } + + #[test] + fn test_with_validated_config_and_result_err() { + let role_group = RoleGroup { + config: new_common_config(ConfigFragment::new(None), None), + replicas: None, + }; + let role = Role::<_, _, GenericRoleConfig, _> { + config: new_common_config(ConfigFragment::new(None), None), + ..Role::default() + }; + let default_config = ConfigFragment::new(None); + + let result: Result, _> = + with_validated_config(&role_group, &role, &default_config); + + assert!(result.is_err()) + } + + #[test] + fn test_resource_names() { + let resource_names = ResourceNames { + cluster_name: ClusterName::from_str_unsafe("my-cluster"), + product_name: ProductName::from_str_unsafe("my-product"), + }; + + assert_eq!( + ServiceAccountName::from_str_unsafe("my-cluster-serviceaccount"), + resource_names.service_account_name() + ); + assert_eq!( + RoleBindingName::from_str_unsafe("my-cluster-rolebinding"), + resource_names.role_binding_name() + ); + assert_eq!( + ClusterRoleName::from_str_unsafe("my-product-clusterrole"), + resource_names.cluster_role_name() + ); + } +} From 60a87ce0282441715007c09e121e0a1a4fd13b13 Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Fri, 8 May 2026 18:43:48 +0200 Subject: [PATCH 3/3] feat: rewrite controller pipeline with validated config types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the monolithic controller with a structured pipeline of validate → build → apply → update_status stages. Each stage operates on validated, type-safe data rather than raw CRD types. Remove discovery.rs, operations/, and service.rs modules whose logic has been absorbed into the new pipeline. Trim listener.rs and kerberos.rs to their shared helpers now that pod-building logic lives in controller/build.rs. Co-Authored-By: Claude Opus 4.6 --- rust/operator-binary/src/controller.rs | 1330 +++-------------- rust/operator-binary/src/controller/apply.rs | 246 +++ rust/operator-binary/src/controller/build.rs | 847 +++++++++++ .../src/controller/dereference.rs | 96 ++ .../src/controller/update_status.rs | 47 + .../src/controller/validate.rs | 976 ++++++++++++ rust/operator-binary/src/crd/mod.rs | 54 +- rust/operator-binary/src/discovery.rs | 113 -- rust/operator-binary/src/framework.rs | 5 - rust/operator-binary/src/framework/builder.rs | 2 - .../src/framework/builder/pdb.rs | 28 +- .../src/framework/kvp/label.rs | 19 +- .../src/framework/role_utils.rs | 26 +- rust/operator-binary/src/kerberos.rs | 68 +- rust/operator-binary/src/listener.rs | 66 +- rust/operator-binary/src/main.rs | 5 +- .../src/operations/graceful_shutdown.rs | 27 - rust/operator-binary/src/operations/mod.rs | 2 - rust/operator-binary/src/operations/pdb.rs | 63 - rust/operator-binary/src/product_logging.rs | 21 +- rust/operator-binary/src/service.rs | 148 -- 21 files changed, 2512 insertions(+), 1677 deletions(-) create mode 100644 rust/operator-binary/src/controller/apply.rs create mode 100644 rust/operator-binary/src/controller/build.rs create mode 100644 rust/operator-binary/src/controller/dereference.rs create mode 100644 rust/operator-binary/src/controller/update_status.rs create mode 100644 rust/operator-binary/src/controller/validate.rs delete mode 100644 rust/operator-binary/src/discovery.rs delete mode 100644 rust/operator-binary/src/operations/graceful_shutdown.rs delete mode 100644 rust/operator-binary/src/operations/mod.rs delete mode 100644 rust/operator-binary/src/operations/pdb.rs delete mode 100644 rust/operator-binary/src/service.rs diff --git a/rust/operator-binary/src/controller.rs b/rust/operator-binary/src/controller.rs index ade37ef0..9dd9393d 100644 --- a/rust/operator-binary/src/controller.rs +++ b/rust/operator-binary/src/controller.rs @@ -1,345 +1,263 @@ -//! Ensures that `Pod`s are configured and running for each [`v1alpha1::HiveCluster`] - -use std::{ - borrow::Cow, - collections::{BTreeMap, HashMap}, - hash::Hasher, - sync::Arc, -}; +use std::{collections::BTreeMap, marker::PhantomData, sync::Arc}; use const_format::concatcp; -use fnv::FnvHasher; -use indoc::formatdoc; -use product_config::{ - ProductConfigManager, - types::PropertyNameKind, - writer::{PropertiesWriterError, to_hadoop_xml, to_java_properties_string}, -}; -use snafu::{OptionExt, ResultExt, Snafu}; +use product_config::ProductConfigManager; +use snafu::{ResultExt, Snafu}; use stackable_operator::{ - builder::{ - self, - configmap::ConfigMapBuilder, - meta::ObjectMetaBuilder, - pod::{ - PodBuilder, - container::ContainerBuilder, - resources::ResourceRequirementsBuilder, - security::PodSecurityContextBuilder, - volume::{ - ListenerOperatorVolumeSourceBuilder, ListenerOperatorVolumeSourceBuilderError, - ListenerReference, SecretOperatorVolumeSourceBuilder, VolumeBuilder, - }, - }, - }, cli::OperatorEnvironmentOptions, cluster_resources::{ClusterResourceApplyStrategy, ClusterResources}, commons::{ - product_image_selection::{self, ResolvedProductImage}, - rbac::build_rbac_resources, - secret_class::SecretClassVolumeProvisionParts, - }, - constants::RESTART_CONTROLLER_ENABLED_LABEL, - crd::{listener::v1alpha1::Listener, s3}, - database_connections::drivers::jdbc::JdbcDatabaseConnectionDetails, - k8s_openapi::{ - DeepMerge, - api::{ - apps::v1::{StatefulSet, StatefulSetSpec}, - core::v1::{ - ConfigMap, ConfigMapVolumeSource, EmptyDirVolumeSource, EnvVar, Probe, - TCPSocketAction, Volume, - }, - }, - apimachinery::pkg::{ - api::resource::Quantity, apis::meta::v1::LabelSelector, util::intstr::IntOrString, + affinity::StackableAffinity, + product_image_selection::ResolvedProductImage, + resources::{NoRuntimeLimits, Resources}, + }, + crd::listener, + k8s_openapi::api::{ + apps::v1::StatefulSet, + core::v1::{ + ConfigMap, Container as K8sContainer, EnvVar, PersistentVolumeClaim, PodTemplateSpec, + Service, ServiceAccount, Volume, VolumeMount, }, + policy::v1::PodDisruptionBudget, + rbac::v1::RoleBinding, }, kube::{ - Resource, ResourceExt, + Resource, + api::ObjectMeta, core::{DeserializeGuard, error_boundary}, runtime::controller::Action, }, - kvp::{Labels, ObjectLabels}, logging::controller::ReconcilerError, - memory::{BinaryMultiple, MemoryQuantity}, - product_config_utils::{transform_all_roles_to_config, validate_all_roles_and_groups_config}, - product_logging::{ - self, - framework::{ - LoggingError, create_vector_shutdown_file_command, remove_vector_shutdown_file_command, - }, - spec::{ - ConfigMapLogConfig, ContainerLogConfig, ContainerLogConfigChoice, - CustomContainerLogConfig, - }, - }, - role_utils::{GenericRoleConfig, RoleGroupRef}, shared::time::Duration, - status::condition::{ - compute_conditions, operations::ClusterOperationsConditionBuilder, - statefulset::StatefulSetConditionBuilder, - }, - utils::{COMMON_BASH_TRAP_FUNCTIONS, cluster_info::KubernetesClusterInfo}, }; -use strum::EnumDiscriminants; -use tracing::warn; +use strum::{EnumDiscriminants, IntoStaticStr}; use crate::{ OPERATOR_NAME, - command::build_container_command_args, - config::{ - jvm::{construct_hadoop_heapsize_env, construct_non_heap_jvm_args}, - opa::{HiveOpaConfig, OPA_TLS_VOLUME_NAME}, - }, - crd::{ - APP_NAME, CORE_SITE_XML, Container, HIVE_PORT, HIVE_PORT_NAME, HIVE_SITE_XML, - HiveClusterStatus, HiveRole, JVM_SECURITY_PROPERTIES_FILE, METRICS_PORT, METRICS_PORT_NAME, - MetaStoreConfig, STACKABLE_CONFIG_DIR, STACKABLE_CONFIG_DIR_NAME, - STACKABLE_CONFIG_MOUNT_DIR, STACKABLE_CONFIG_MOUNT_DIR_NAME, - STACKABLE_LOG_CONFIG_MOUNT_DIR, STACKABLE_LOG_CONFIG_MOUNT_DIR_NAME, STACKABLE_LOG_DIR, - STACKABLE_LOG_DIR_NAME, - databases::{MetadataDatabaseConnection, derby_driver_class}, - v1alpha1::{self, HiveMetastoreRoleConfig}, - }, - discovery::{self}, - kerberos::{ - self, add_kerberos_pod_config, kerberos_config_properties, - kerberos_container_start_commands, + crd::{APP_NAME, HiveRole, MetastoreStorageConfig, v1alpha1}, + framework::{ + HasName, HasUid, NameIsValidLabelValue, + product_logging::framework::{ValidatedContainerLogConfigChoice, VectorContainerLogConfig}, + types::{ + kubernetes::{NamespaceName, Uid}, + operator::ClusterName, + }, }, - listener::{LISTENER_VOLUME_DIR, LISTENER_VOLUME_NAME, build_role_listener}, - operations::{graceful_shutdown::add_graceful_shutdown_config, pdb::add_pdbs}, - product_logging::extend_role_group_config_map, - service::{build_rolegroup_headless_service, build_rolegroup_metrics_service}, }; +pub mod apply; +pub mod build; +pub mod dereference; +pub mod update_status; +pub mod validate; + pub const HIVE_CONTROLLER_NAME: &str = "hivecluster"; +pub const CONTAINER_IMAGE_BASE_NAME: &str = "hive"; pub const HIVE_FULL_CONTROLLER_NAME: &str = concatcp!(HIVE_CONTROLLER_NAME, '.', OPERATOR_NAME); -const CONTAINER_IMAGE_BASE_NAME: &str = "hive"; - -pub const MAX_HIVE_LOG_FILES_SIZE: MemoryQuantity = MemoryQuantity { - value: 10.0, - unit: BinaryMultiple::Mebi, -}; - pub struct Ctx { pub client: stackable_operator::client::Client, pub product_config: ProductConfigManager, pub operator_environment: OperatorEnvironmentOptions, } -#[derive(Snafu, Debug, EnumDiscriminants)] -#[strum_discriminants(derive(strum::IntoStaticStr))] -#[allow(clippy::enum_variant_names)] -pub enum Error { - #[snafu(display("object defines no namespace"))] - ObjectHasNoNamespace, - - #[snafu(display("object defines no metastore role"))] - NoMetaStoreRole, - - #[snafu(display("failed to apply Service for {rolegroup}"))] - ApplyRoleGroupService { - source: stackable_operator::cluster_resources::Error, - rolegroup: RoleGroupRef, - }, - - #[snafu(display("failed to build ConfigMap for {rolegroup}"))] - BuildRoleGroupConfig { - source: stackable_operator::builder::configmap::Error, - rolegroup: RoleGroupRef, - }, - - #[snafu(display("failed to apply ConfigMap for {rolegroup}"))] - ApplyRoleGroupConfig { - source: stackable_operator::cluster_resources::Error, - rolegroup: RoleGroupRef, - }, - - #[snafu(display("failed to apply StatefulSet for {rolegroup}"))] - ApplyRoleGroupStatefulSet { - source: stackable_operator::cluster_resources::Error, - rolegroup: RoleGroupRef, - }, - - #[snafu(display("failed to generate product config"))] - GenerateProductConfig { - source: stackable_operator::product_config_utils::Error, - }, - - #[snafu(display("invalid product config"))] - InvalidProductConfig { - source: stackable_operator::product_config_utils::Error, - }, - - #[snafu(display("object is missing metadata to build owner reference"))] - ObjectMissingMetadataForOwnerRef { - source: stackable_operator::builder::meta::Error, - }, - - #[snafu(display("failed to build discovery ConfigMap"))] - BuildDiscoveryConfig { source: discovery::Error }, - - #[snafu(display("failed to apply discovery ConfigMap"))] - ApplyDiscoveryConfig { - source: stackable_operator::cluster_resources::Error, - }, - - #[snafu(display("failed to update status"))] - ApplyStatus { - source: stackable_operator::client::Error, - }, - - #[snafu(display("failed to configure S3 connection"))] - ConfigureS3Connection { - source: stackable_operator::crd::s3::v1alpha1::ConnectionError, - }, - - #[snafu(display( - "Hive does not support skipping the verification of the tls enabled S3 server" - ))] - S3TlsNoVerificationNotSupported, - - #[snafu(display("failed to resolve and merge resource config for role and role group"))] - FailedToResolveResourceConfig { source: crate::crd::Error }, +pub(crate) struct Prepared; +pub(crate) struct Applied; + +pub(crate) struct KubernetesResources { + pub stateful_sets: Vec, + pub config_maps: Vec, + pub services: Vec, + pub service_accounts: Vec, + pub role_bindings: Vec, + pub pod_disruption_budgets: Vec, + pub listeners: Vec, + pub discovery_hash: Option, + pub _status: PhantomData, +} - #[snafu(display("failed to create hive container [{name}]"))] - FailedToCreateHiveContainer { - source: stackable_operator::builder::pod::container::Error, - name: String, - }, +#[derive(Clone, Debug)] +pub struct ValidatedRoleGroupConfig { + pub resources: Resources, + pub logging: ValidatedLogging, + pub affinity: StackableAffinity, + pub graceful_shutdown_timeout: Duration, + pub hive_site_xml_content: String, + pub jvm_security_properties_content: String, + pub core_site_xml_content: Option, +} - #[snafu(display("failed to create cluster resources"))] - CreateClusterResources { - source: stackable_operator::cluster_resources::Error, - }, +#[derive(Clone, Debug)] +pub struct ValidatedRoleConfig { + pub pdb_enabled: bool, + pub pdb_max_unavailable: u16, + pub listener_class: String, + pub listener_name: String, +} - #[snafu(display("failed to delete orphaned resources"))] - DeleteOrphanedResources { - source: stackable_operator::cluster_resources::Error, - }, +#[derive(Clone)] +pub struct PrecomputedPodData { + pub env_vars: Vec, + pub commands: Vec, + pub kerberos_volumes: Vec, + pub kerberos_volume_mounts: Vec, + pub s3_volumes: Vec, + pub s3_volume_mounts: Vec, + pub hdfs_volumes: Vec, + pub hdfs_volume_mounts: Vec, + pub opa_volumes: Vec, + pub opa_volume_mounts: Vec, + pub vector_container: Option, + pub service_account_name: String, + pub replicas: Option, + pub pod_overrides: PodTemplateSpec, + pub listener_volume_claim_template: PersistentVolumeClaim, +} - #[snafu(display("vector agent is enabled but vector aggregator ConfigMap is missing"))] - VectorAggregatorConfigMapMissing, +#[derive(Clone, Debug)] +pub struct ValidatedLogging { + pub hive_container: ValidatedContainerLogConfigChoice, + pub vector_container: Option, +} - #[snafu(display("failed to add the logging configuration to the ConfigMap [{cm_name}]"))] - InvalidLoggingConfig { - source: crate::product_logging::Error, - cm_name: String, - }, +impl ValidatedLogging { + pub fn is_vector_agent_enabled(&self) -> bool { + self.vector_container.is_some() + } +} - #[snafu(display("failed to patch service account"))] - ApplyServiceAccount { - source: stackable_operator::cluster_resources::Error, - }, +#[derive(Clone)] +pub struct ValidatedHiveCluster { + metadata: ObjectMeta, + pub image: ResolvedProductImage, + pub name: ClusterName, + pub namespace: NamespaceName, + pub uid: Uid, + pub role_groups: BTreeMap>, + pub precomputed_pod_data: BTreeMap>, + pub role_configs: BTreeMap, +} - #[snafu(display("failed to patch role binding"))] - ApplyRoleBinding { - source: stackable_operator::cluster_resources::Error, - }, +impl ValidatedHiveCluster { + #[allow(clippy::too_many_arguments)] + pub fn new( + image: ResolvedProductImage, + name: ClusterName, + namespace: NamespaceName, + uid: Uid, + role_groups: BTreeMap>, + precomputed_pod_data: BTreeMap>, + role_configs: BTreeMap, + ) -> Self { + Self { + metadata: ObjectMeta { + name: Some(name.to_string()), + namespace: Some(namespace.to_string()), + uid: Some(uid.to_string()), + ..ObjectMeta::default() + }, + image, + name, + namespace, + uid, + role_groups, + precomputed_pod_data, + role_configs, + } + } +} - #[snafu(display("failed to build RBAC resources"))] - BuildRbacResources { - source: stackable_operator::commons::rbac::Error, - }, +impl HasName for ValidatedHiveCluster { + fn to_name(&self) -> String { + self.name.to_string() + } +} - #[snafu(display("internal operator failure"))] - InternalOperatorFailure { source: crate::crd::Error }, +impl HasUid for ValidatedHiveCluster { + fn to_uid(&self) -> Uid { + self.uid.clone() + } +} - #[snafu(display( - "failed to serialize [{JVM_SECURITY_PROPERTIES_FILE}] for {}", - rolegroup - ))] - JvmSecurityPoperties { - source: PropertiesWriterError, - rolegroup: String, - }, +impl NameIsValidLabelValue for ValidatedHiveCluster { + fn to_label_value(&self) -> String { + self.name.to_label_value() + } +} - #[snafu(display("failed to create PodDisruptionBudget"))] - FailedToCreatePdb { - source: crate::operations::pdb::Error, - }, +impl ValidatedHiveCluster { + pub fn rolegroup_ref( + &self, + role: &HiveRole, + role_group: &str, + ) -> stackable_operator::role_utils::RoleGroupRef { + stackable_operator::role_utils::RoleGroupRef { + cluster: stackable_operator::kube::runtime::reflector::ObjectRef::from_obj(self), + role: role.to_string(), + role_group: role_group.to_string(), + } + } +} - #[snafu(display("failed to configure graceful shutdown"))] - GracefulShutdown { - source: crate::operations::graceful_shutdown::Error, - }, +impl Resource for ValidatedHiveCluster { + type DynamicType = ::DynamicType; + type Scope = ::Scope; - #[snafu(display("failed to build Labels"))] - LabelBuild { - source: stackable_operator::kvp::LabelError, - }, + fn kind(dt: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + v1alpha1::HiveCluster::kind(dt) + } - #[snafu(display("failed to build Metadata"))] - MetadataBuild { - source: stackable_operator::builder::meta::Error, - }, + fn group(dt: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + v1alpha1::HiveCluster::group(dt) + } - #[snafu(display("failed to get required Labels"))] - GetRequiredLabels { - source: - stackable_operator::kvp::KeyValuePairError, - }, + fn version(dt: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + v1alpha1::HiveCluster::version(dt) + } - #[snafu(display("failed to add kerberos config"))] - AddKerberosConfig { source: kerberos::Error }, + fn plural(dt: &Self::DynamicType) -> std::borrow::Cow<'_, str> { + v1alpha1::HiveCluster::plural(dt) + } - #[snafu(display("failed to build vector container"))] - BuildVectorContainer { source: LoggingError }, + fn meta(&self) -> &ObjectMeta { + &self.metadata + } - #[snafu(display("failed to add needed volume"))] - AddVolume { source: builder::pod::Error }, + fn meta_mut(&mut self) -> &mut ObjectMeta { + &mut self.metadata + } +} - #[snafu(display("failed to add needed volumeMount"))] - AddVolumeMount { - source: builder::pod::container::Error, - }, +// --------------------------------------------------------------------------- +// Reconcile +// --------------------------------------------------------------------------- +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { #[snafu(display("HiveCluster object is invalid"))] InvalidHiveCluster { source: error_boundary::InvalidObject, }, - #[snafu(display("failed to construct JVM arguments"))] - ConstructJvmArguments { source: crate::config::jvm::Error }, - - #[snafu(display("failed to apply group listener for {role}"))] - ApplyGroupListener { - source: stackable_operator::cluster_resources::Error, - role: String, - }, - #[snafu(display("failed to configure listener"))] - ListenerConfiguration { source: crate::listener::Error }, - - #[snafu(display("failed to build listener volume"))] - BuildListenerVolume { - source: ListenerOperatorVolumeSourceBuilderError, - }, - - #[snafu(display("failed to configure service"))] - ServiceConfiguration { source: crate::service::Error }, + #[snafu(display("failed to dereference resources"))] + Dereference { source: dereference::Error }, - #[snafu(display("failed to resolve product image"))] - ResolveProductImage { - source: product_image_selection::Error, - }, + #[snafu(display("failed to validate cluster"))] + Validate { source: validate::Error }, - #[snafu(display("invalid OpaConfig"))] - InvalidOpaConfig { - source: stackable_operator::commons::opa::Error, + #[snafu(display("failed to create cluster resources"))] + CreateClusterResources { + source: stackable_operator::cluster_resources::Error, }, - #[snafu(display("failed to build TLS certificate SecretClass Volume"))] - TlsCertSecretClassVolumeBuild { - source: stackable_operator::builder::pod::volume::SecretOperatorVolumeSourceBuilderError, - }, + #[snafu(display("failed to apply resources"))] + Apply { source: apply::Error }, - #[snafu(display("invalid metadata database connection"))] - InvalidMetadataDatabaseConnection { - source: stackable_operator::database_connections::Error, - }, + #[snafu(display("failed to update status"))] + UpdateStatus { source: update_status::Error }, } + type Result = std::result::Result; impl ReconcilerError for Error { @@ -348,84 +266,38 @@ impl ReconcilerError for Error { } } -pub async fn reconcile_hive( +pub async fn reconcile( hive: Arc>, ctx: Arc, ) -> Result { tracing::info!("Starting reconcile"); + let hive = hive .0 .as_ref() .map_err(error_boundary::InvalidObject::clone) .context(InvalidHiveClusterSnafu)?; - let client = &ctx.client; - let hive_namespace = hive.namespace().context(ObjectHasNoNamespaceSnafu)?; - - let resolved_product_image = hive - .spec - .image - .resolve( - CONTAINER_IMAGE_BASE_NAME, - &ctx.operator_environment.image_repository, - crate::built_info::PKG_VERSION, - ) - .context(ResolveProductImageSnafu)?; - let role = hive.spec.metastore.as_ref().context(NoMetaStoreRoleSnafu)?; - let hive_role = HiveRole::MetaStore; - - let s3_connection_spec: Option = - if let Some(s3) = &hive.spec.cluster_config.s3 { - Some( - s3.clone() - .resolve( - client, - &hive.namespace().ok_or(Error::ObjectHasNoNamespace)?, - ) - .await - .context(ConfigureS3ConnectionSnafu)?, - ) - } else { - None - }; - - let metadata_database_connection_details = hive - .spec - .cluster_config - .metadata_database - .jdbc_connection_details("METADATA") - .context(InvalidMetadataDatabaseConnectionSnafu)?; - let validated_config = validate_all_roles_and_groups_config( - &resolved_product_image.product_version, - &transform_all_roles_to_config( - hive, - &[( - HiveRole::MetaStore.to_string(), - ( - vec![ - PropertyNameKind::Env, - PropertyNameKind::Cli, - PropertyNameKind::File(HIVE_SITE_XML.to_string()), - PropertyNameKind::File(JVM_SECURITY_PROPERTIES_FILE.to_string()), - ], - role.clone(), - ), - )] - .into(), - ) - .context(GenerateProductConfigSnafu)?, - &ctx.product_config, - false, - false, + // --- dereference (async, fallible) --- + let dereferenced = dereference::dereference( + &ctx.client, + hive, + CONTAINER_IMAGE_BASE_NAME, + &ctx.operator_environment.image_repository, + crate::built_info::PKG_VERSION, ) - .context(InvalidProductConfigSnafu)?; + .await + .context(DereferenceSnafu)?; + + // --- validate (sync, fallible) --- + let validated = validate::validate_cluster(hive, &dereferenced, &ctx.product_config) + .context(ValidateSnafu)?; - let metastore_config = validated_config - .get(&HiveRole::MetaStore.to_string()) - .map(Cow::Borrowed) - .unwrap_or_default(); + // --- build (sync, infallible) --- + let prepared = build::build(&validated); - let mut cluster_resources = ClusterResources::new( + // --- apply (async, fallible) --- + let cluster_resources = ClusterResources::new( APP_NAME, OPERATOR_NAME, HIVE_CONTROLLER_NAME, @@ -435,778 +307,26 @@ pub async fn reconcile_hive( ) .context(CreateClusterResourcesSnafu)?; - let (rbac_sa, rbac_rolebinding) = build_rbac_resources( - hive, - APP_NAME, - cluster_resources - .get_required_labels() - .context(GetRequiredLabelsSnafu)?, - ) - .context(BuildRbacResourcesSnafu)?; - - let rbac_sa = cluster_resources - .add(client, rbac_sa) - .await - .context(ApplyServiceAccountSnafu)?; - - cluster_resources - .add(client, rbac_rolebinding) - .await - .context(ApplyRoleBindingSnafu)?; - - let hive_opa_config = match hive.get_opa_config() { - Some(opa_config) => Some( - HiveOpaConfig::from_opa_config(client, hive, opa_config) - .await - .context(InvalidOpaConfigSnafu)?, - ), - None => None, - }; - - let mut ss_cond_builder = StatefulSetConditionBuilder::default(); - - for (rolegroup_name, rolegroup_config) in metastore_config.iter() { - let rolegroup = hive.metastore_rolegroup_ref(rolegroup_name); - - let config = hive - .merged_config(&HiveRole::MetaStore, &rolegroup) - .context(FailedToResolveResourceConfigSnafu)?; - - let rg_metrics_service = - build_rolegroup_metrics_service(hive, &resolved_product_image, &rolegroup) - .context(ServiceConfigurationSnafu)?; - - let rg_headless_service = - build_rolegroup_headless_service(hive, &resolved_product_image, &rolegroup) - .context(ServiceConfigurationSnafu)?; - - let rg_configmap = build_metastore_rolegroup_config_map( - hive, - &hive_namespace, - &resolved_product_image, - &rolegroup, - rolegroup_config, - &metadata_database_connection_details, - s3_connection_spec.as_ref(), - &config, - &client.kubernetes_cluster_info, - hive_opa_config.as_ref(), - )?; - let rg_statefulset = build_metastore_rolegroup_statefulset( - hive, - &hive_role, - &resolved_product_image, - &rolegroup, - rolegroup_config, - &metadata_database_connection_details, - s3_connection_spec.as_ref(), - &config, - &rbac_sa.name_any(), - hive_opa_config.as_ref(), - )?; - - cluster_resources - .add(client, rg_metrics_service) - .await - .context(ApplyRoleGroupServiceSnafu { - rolegroup: rolegroup.clone(), - })?; - - cluster_resources - .add(client, rg_headless_service) - .await - .context(ApplyRoleGroupServiceSnafu { - rolegroup: rolegroup.clone(), - })?; - - cluster_resources - .add(client, rg_configmap) - .await - .context(ApplyRoleGroupConfigSnafu { - rolegroup: rolegroup.clone(), - })?; - - // Note: The StatefulSet needs to be applied after all ConfigMaps and Secrets it mounts - // to prevent unnecessary Pod restarts. - // See https://github.com/stackabletech/commons-operator/issues/111 for details. - ss_cond_builder.add( - cluster_resources - .add(client, rg_statefulset) - .await - .context(ApplyRoleGroupStatefulSetSnafu { - rolegroup: rolegroup.clone(), - })?, - ); - } - - let role_config = hive.role_config(&hive_role); - if let Some(HiveMetastoreRoleConfig { - common: GenericRoleConfig { - pod_disruption_budget: pdb, - }, - .. - }) = role_config - { - add_pdbs(pdb, hive, &hive_role, client, &mut cluster_resources) - .await - .context(FailedToCreatePdbSnafu)?; - } - - // std's SipHasher is deprecated, and DefaultHasher is unstable across Rust releases. - // We don't /need/ stability, but it's still nice to avoid spurious changes where possible. - let mut discovery_hash = FnvHasher::with_key(0); - - if let Some(HiveMetastoreRoleConfig { listener_class, .. }) = role_config { - let role_listener: Listener = - build_role_listener(hive, &resolved_product_image, &hive_role, listener_class) - .context(ListenerConfigurationSnafu)?; - let listener = cluster_resources.add(client, role_listener).await.context( - ApplyGroupListenerSnafu { - role: hive_role.to_string(), - }, - )?; - - for discovery_cm in discovery::build_discovery_configmaps( - hive, - hive, - hive_role, - &resolved_product_image, - None, - listener, - ) + let applied = apply::Applier::new(&ctx.client, cluster_resources) + .apply(prepared, &validated) .await - .context(BuildDiscoveryConfigSnafu)? - { - let discovery_cm = cluster_resources - .add(client, discovery_cm) - .await - .context(ApplyDiscoveryConfigSnafu)?; - if let Some(generation) = discovery_cm.metadata.resource_version { - discovery_hash.write(generation.as_bytes()) - } - } - } - - let cluster_operation_cond_builder = - ClusterOperationsConditionBuilder::new(&hive.spec.cluster_operation); - - let status = HiveClusterStatus { - // Serialize as a string to discourage users from trying to parse the value, - // and to keep things flexible if we end up changing the hasher at some point. - discovery_hash: Some(discovery_hash.finish().to_string()), - conditions: compute_conditions(hive, &[&ss_cond_builder, &cluster_operation_cond_builder]), - }; + .context(ApplySnafu)?; - client - .apply_patch_status(OPERATOR_NAME, hive, &status) + // --- update status (async, fallible) --- + update_status::update_status(&ctx.client, hive, applied) .await - .context(ApplyStatusSnafu)?; - - cluster_resources - .delete_orphaned_resources(client) - .await - .context(DeleteOrphanedResourcesSnafu)?; + .context(UpdateStatusSnafu)?; Ok(Action::await_change()) } -/// The rolegroup [`ConfigMap`] configures the rolegroup based on the configuration given by the administrator -#[allow(clippy::too_many_arguments)] -fn build_metastore_rolegroup_config_map( - hive: &v1alpha1::HiveCluster, - hive_namespace: &str, - resolved_product_image: &ResolvedProductImage, - rolegroup: &RoleGroupRef, - role_group_config: &HashMap>, - database_connection_details: &JdbcDatabaseConnectionDetails, - s3_connection_spec: Option<&s3::v1alpha1::ConnectionSpec>, - merged_config: &MetaStoreConfig, - cluster_info: &KubernetesClusterInfo, - hive_opa_config: Option<&HiveOpaConfig>, -) -> Result { - let mut hive_site_data = String::new(); - - for (property_name_kind, config) in role_group_config { - match property_name_kind { - PropertyNameKind::File(file_name) if file_name == HIVE_SITE_XML => { - let mut data = BTreeMap::new(); - - data.insert( - MetaStoreConfig::METASTORE_WAREHOUSE_DIR.to_string(), - Some("/stackable/warehouse".to_string()), - ); - - // The Derby driver class needs some special handling - let driver = match &hive.spec.cluster_config.metadata_database { - MetadataDatabaseConnection::Derby(_) => { - derby_driver_class(&resolved_product_image.product_version) - } - _ => database_connection_details.driver.as_str(), - }; - data.insert( - MetaStoreConfig::CONNECTION_DRIVER_NAME.to_string(), - Some(driver.to_owned()), - ); - data.insert( - MetaStoreConfig::CONNECTION_URL.to_string(), - Some(database_connection_details.connection_url.to_string()), - ); - if let Some(EnvVar { - name: username_env_name, - .. - }) = &database_connection_details.username_env - { - data.insert( - MetaStoreConfig::CONNECTION_USER_NAME.to_string(), - Some(format!("${{env:{username_env_name}}}",)), - ); - } - if let Some(EnvVar { - name: password_env_name, - .. - }) = &database_connection_details.password_env - { - data.insert( - MetaStoreConfig::CONNECTION_PASSWORD.to_string(), - Some(format!("${{env:{password_env_name}}}",)), - ); - } - - if let Some(s3) = s3_connection_spec { - data.insert( - MetaStoreConfig::S3_ENDPOINT.to_string(), - Some( - s3.endpoint() - .context(ConfigureS3ConnectionSnafu)? - .to_string(), - ), - ); - - data.insert( - MetaStoreConfig::S3_REGION_NAME.to_string(), - Some(s3.region.name.clone()), - ); - - if let Some((access_key_file, secret_key_file)) = s3.credentials_mount_paths() { - // Will be replaced by config-utils - data.insert( - MetaStoreConfig::S3_ACCESS_KEY.to_string(), - Some(format!("${{file:UTF-8:{access_key_file}}}")), - ); - data.insert( - MetaStoreConfig::S3_SECRET_KEY.to_string(), - Some(format!("${{file:UTF-8:{secret_key_file}}}")), - ); - } - - data.insert( - MetaStoreConfig::S3_SSL_ENABLED.to_string(), - Some(s3.tls.uses_tls().to_string()), - ); - data.insert( - MetaStoreConfig::S3_PATH_STYLE_ACCESS.to_string(), - Some((s3.access_style == s3::v1alpha1::S3AccessStyle::Path).to_string()), - ); - } - - for (property_name, property_value) in - kerberos_config_properties(hive, hive_namespace, cluster_info) - { - data.insert(property_name.to_string(), Some(property_value.to_string())); - } - - // OPA settings - if let Some(opa_config) = hive_opa_config { - data.extend( - opa_config - .as_config(&resolved_product_image.product_version) - .into_iter() - .map(|(k, v)| (k, Some(v))) - .collect::>>(), - ); - } - - // overrides - for (property_name, property_value) in config { - data.insert(property_name.to_string(), Some(property_value.to_string())); - } - - hive_site_data = to_hadoop_xml(data.iter()); - } - _ => {} - } - } - - let jvm_sec_props: BTreeMap> = role_group_config - .get(&PropertyNameKind::File( - JVM_SECURITY_PROPERTIES_FILE.to_string(), - )) - .cloned() - .unwrap_or_default() - .into_iter() - .map(|(k, v)| (k, Some(v))) - .collect(); - - let mut cm_builder = ConfigMapBuilder::new(); - - cm_builder - .metadata( - ObjectMetaBuilder::new() - .name_and_namespace(hive) - .name(rolegroup.object_name()) - .ownerreference_from_resource(hive, None, Some(true)) - .context(ObjectMissingMetadataForOwnerRefSnafu)? - .with_recommended_labels(&build_recommended_labels( - hive, - &resolved_product_image.app_version_label_value, - &rolegroup.role, - &rolegroup.role_group, - )) - .context(MetadataBuildSnafu)? - .build(), - ) - .add_data(HIVE_SITE_XML, hive_site_data) - .add_data( - JVM_SECURITY_PROPERTIES_FILE, - to_java_properties_string(jvm_sec_props.iter()).with_context(|_| { - JvmSecurityPopertiesSnafu { - rolegroup: rolegroup.role_group.clone(), - } - })?, - ); - - if hive.has_kerberos_enabled() && hive.spec.cluster_config.hdfs.is_none() { - // if kerberos is activated but we have no HDFS as backend (i.e. S3) then a core-site.xml is - // needed to set "hadoop.security.authentication" - let mut data = BTreeMap::new(); - data.insert( - "hadoop.security.authentication".to_string(), - Some("kerberos".to_string()), - ); - cm_builder.add_data(CORE_SITE_XML, to_hadoop_xml(data.iter())); - } - - extend_role_group_config_map(rolegroup, &merged_config.logging, &mut cm_builder).context( - InvalidLoggingConfigSnafu { - cm_name: rolegroup.object_name(), - }, - )?; - - cm_builder - .build() - .with_context(|_| BuildRoleGroupConfigSnafu { - rolegroup: rolegroup.clone(), - }) -} - -/// The rolegroup [`StatefulSet`] runs the rolegroup, as configured by the administrator. -/// -/// The [`Pod`](`stackable_operator::k8s_openapi::api::core::v1::Pod`)s are accessible through the -/// corresponding [`Service`](`stackable_operator::k8s_openapi::api::core::v1::Service`) (via [`build_rolegroup_headless_service`] and metrics from [`build_rolegroup_metrics_service`]). -#[allow(clippy::too_many_arguments)] -fn build_metastore_rolegroup_statefulset( - hive: &v1alpha1::HiveCluster, - hive_role: &HiveRole, - resolved_product_image: &ResolvedProductImage, - rolegroup_ref: &RoleGroupRef, - metastore_config: &HashMap>, - database_connection_details: &JdbcDatabaseConnectionDetails, - s3_connection: Option<&s3::v1alpha1::ConnectionSpec>, - merged_config: &MetaStoreConfig, - sa_name: &str, - hive_opa_config: Option<&HiveOpaConfig>, -) -> Result { - let role = hive.role(hive_role).context(InternalOperatorFailureSnafu)?; - let rolegroup = hive - .rolegroup(rolegroup_ref) - .context(InternalOperatorFailureSnafu)?; - - let mut container_builder = - ContainerBuilder::new(APP_NAME).context(FailedToCreateHiveContainerSnafu { - name: APP_NAME.to_string(), - })?; - - container_builder - .add_env_var( - "HADOOP_HEAPSIZE", - construct_hadoop_heapsize_env(merged_config).context(ConstructJvmArgumentsSnafu)?, - ) - .add_env_var( - "HADOOP_OPTS", - construct_non_heap_jvm_args(hive, role, &rolegroup_ref.role_group) - .context(ConstructJvmArgumentsSnafu)?, - ) - .add_env_var( - "CONTAINERDEBUG_LOG_DIRECTORY", - format!("{STACKABLE_LOG_DIR}/containerdebug"), - ); - database_connection_details.add_to_container(&mut container_builder); - - for (property_name_kind, config) in metastore_config { - if property_name_kind == &PropertyNameKind::Env { - // overrides - for (property_name, property_value) in config { - if property_name.is_empty() { - warn!( - property_name, - property_value, - "The env variable had an empty name, not adding it to the container" - ); - continue; - } - container_builder.add_env_var(property_name, property_value); - } - } - } - - let mut pod_builder = PodBuilder::new(); - - if let Some(hdfs) = &hive.spec.cluster_config.hdfs { - pod_builder - .add_volume( - VolumeBuilder::new("hdfs-discovery") - .with_config_map(&hdfs.config_map) - .build(), - ) - .context(AddVolumeSnafu)?; - container_builder - .add_volume_mount("hdfs-discovery", "/stackable/mount/hdfs-config") - .context(AddVolumeMountSnafu)?; - } - - if let Some(s3) = s3_connection { - s3.add_volumes_and_mounts(&mut pod_builder, vec![&mut container_builder]) - .context(ConfigureS3ConnectionSnafu)?; - - if s3.tls.uses_tls() && !s3.tls.uses_tls_verification() { - S3TlsNoVerificationNotSupportedSnafu.fail()?; - } - } - - // Add OPA TLS certs if configured - if let Some((tls_secret_class, tls_mount_path)) = - hive_opa_config.as_ref().and_then(|opa_config| { - opa_config - .tls_secret_class - .as_ref() - .zip(opa_config.tls_ca_cert_mount_path()) - }) - { - container_builder - .add_volume_mount(OPA_TLS_VOLUME_NAME, &tls_mount_path) - .context(AddVolumeMountSnafu)?; - - let opa_tls_volume = VolumeBuilder::new(OPA_TLS_VOLUME_NAME) - .ephemeral( - SecretOperatorVolumeSourceBuilder::new( - tls_secret_class, - // We only need the public CA certificate to verify the OPA server. - SecretClassVolumeProvisionParts::Public, - ) - .build() - .context(TlsCertSecretClassVolumeBuildSnafu)?, - ) - .build(); - - pod_builder - .add_volume(opa_tls_volume) - .context(AddVolumeSnafu)?; - } - - let db_type = hive.spec.cluster_config.metadata_database.as_hive_db_type(); - let start_command = if resolved_product_image.product_version.starts_with("3.") { - // The schematool version in 3.1.x does *not* support the `-initOrUpgradeSchema` flag yet, so we can not use that. - // As we *only* support HMS 3.1.x (or newer) since SDP release 23.11, we can safely assume we are always coming - // from an existing 3.1.x installation. There is no need to upgrade the schema, we can just check if the schema - // is already there and create it if it isn't. - // The script `bin/start-metastore` is buggy (e.g. around version upgrades), but it's sufficient for that job :) - // - // TODO: Once we drop support for HMS 3.1.x we can remove this condition and very likely get rid of the - // "bin/start-metastore" script. - format!( - "bin/start-metastore --config {STACKABLE_CONFIG_DIR} --db-type {db_type} --hive-bin-dir bin &" - ) - } else { - // schematool versions 4.0.x (and above) support the `-initOrUpgradeSchema`, which is exactly what we need :) - // Some docs for the schemaTool can be found here: https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=34835119 - formatdoc! {" - bin/base --config \"{STACKABLE_CONFIG_DIR}\" --service schemaTool -dbType \"{db_type}\" -initOrUpgradeSchema - bin/base --config \"{STACKABLE_CONFIG_DIR}\" --service metastore & - "} - }; - - let container_builder = container_builder - .image_from_product_image(resolved_product_image) - .command(vec![ - "/bin/bash".to_string(), - "-x".to_string(), - "-euo".to_string(), - "pipefail".to_string(), - "-c".to_string(), - ]) - .args(build_container_command_args( - hive, - formatdoc! {" - {kerberos_container_start_commands} - - {COMMON_BASH_TRAP_FUNCTIONS} - {remove_vector_shutdown_file_command} - prepare_signal_handlers - containerdebug --output={STACKABLE_LOG_DIR}/containerdebug-state.json --loop & - {start_command} - wait_for_termination $! - {create_vector_shutdown_file_command} - ", - kerberos_container_start_commands = kerberos_container_start_commands(hive), - remove_vector_shutdown_file_command = - remove_vector_shutdown_file_command(STACKABLE_LOG_DIR), - create_vector_shutdown_file_command = - create_vector_shutdown_file_command(STACKABLE_LOG_DIR), - }, - s3_connection, - hive_opa_config, - )) - .add_volume_mount(STACKABLE_CONFIG_DIR_NAME, STACKABLE_CONFIG_DIR) - .context(AddVolumeMountSnafu)? - .add_volume_mount(STACKABLE_CONFIG_MOUNT_DIR_NAME, STACKABLE_CONFIG_MOUNT_DIR) - .context(AddVolumeMountSnafu)? - .add_volume_mount(STACKABLE_LOG_DIR_NAME, STACKABLE_LOG_DIR) - .context(AddVolumeMountSnafu)? - .add_volume_mount( - STACKABLE_LOG_CONFIG_MOUNT_DIR_NAME, - STACKABLE_LOG_CONFIG_MOUNT_DIR, - ) - .context(AddVolumeMountSnafu)? - .add_container_port(HIVE_PORT_NAME, HIVE_PORT.into()) - .add_container_port(METRICS_PORT_NAME, METRICS_PORT.into()) - .resources(merged_config.resources.clone().into()) - .readiness_probe(Probe { - initial_delay_seconds: Some(10), - period_seconds: Some(10), - failure_threshold: Some(5), - tcp_socket: Some(TCPSocketAction { - port: IntOrString::String(HIVE_PORT_NAME.to_string()), - ..TCPSocketAction::default() - }), - ..Probe::default() - }) - .liveness_probe(Probe { - initial_delay_seconds: Some(30), - period_seconds: Some(10), - tcp_socket: Some(TCPSocketAction { - port: IntOrString::String(HIVE_PORT_NAME.to_string()), - ..TCPSocketAction::default() - }), - ..Probe::default() - }); - - // TODO: refactor this when CRD versioning is in place - // Warn if the capacity field has been set to anything other than 0Mi - if let Some(Quantity(capacity)) = merged_config.resources.storage.data.capacity.as_ref() { - if capacity != &"0Mi".to_string() { - tracing::warn!( - "The 'storage' CRD property is set to [{capacity}]. This field is not used and will be removed in a future release." - ); - } - } - - let recommended_object_labels = build_recommended_labels( - hive, - &resolved_product_image.app_version_label_value, - &rolegroup_ref.role, - &rolegroup_ref.role_group, - ); - // Used for PVC templates that cannot be modified once they are deployed - let unversioned_recommended_labels = Labels::recommended(&build_recommended_labels( - hive, - // A version value is required, and we do want to use the "recommended" format for the other desired labels - "none", - &rolegroup_ref.role, - &rolegroup_ref.role_group, - )) - .context(LabelBuildSnafu)?; - - let metadata = ObjectMetaBuilder::new() - .with_recommended_labels(&recommended_object_labels) - .context(MetadataBuildSnafu)? - .build(); - - let pvc = ListenerOperatorVolumeSourceBuilder::new( - &ListenerReference::ListenerName(hive.role_listener_name(hive_role)), - &unversioned_recommended_labels, - ) - .build_pvc(LISTENER_VOLUME_NAME.to_owned()) - .context(BuildListenerVolumeSnafu)?; - - container_builder - .add_volume_mount(LISTENER_VOLUME_NAME, LISTENER_VOLUME_DIR) - .context(AddVolumeMountSnafu)?; - - pod_builder - .metadata(metadata) - .image_pull_secrets_from_product_image(resolved_product_image) - .add_volume(Volume { - name: STACKABLE_CONFIG_DIR_NAME.to_string(), - empty_dir: Some(EmptyDirVolumeSource { - medium: None, - size_limit: Some(Quantity("10Mi".to_string())), - }), - ..Volume::default() - }) - .context(AddVolumeSnafu)? - .add_volume(stackable_operator::k8s_openapi::api::core::v1::Volume { - name: STACKABLE_CONFIG_MOUNT_DIR_NAME.to_string(), - config_map: Some(ConfigMapVolumeSource { - name: rolegroup_ref.object_name(), - ..Default::default() - }), - ..Default::default() - }) - .context(AddVolumeSnafu)? - .add_empty_dir_volume( - STACKABLE_LOG_DIR_NAME, - Some(product_logging::framework::calculate_log_volume_size_limit( - &[MAX_HIVE_LOG_FILES_SIZE], - )), - ) - .context(AddVolumeSnafu)? - .affinity(&merged_config.affinity) - .service_account_name(sa_name) - .security_context(PodSecurityContextBuilder::new().fs_group(1000).build()); - - if let Some(ContainerLogConfig { - choice: - Some(ContainerLogConfigChoice::Custom(CustomContainerLogConfig { - custom: ConfigMapLogConfig { config_map }, - })), - }) = merged_config.logging.containers.get(&Container::Hive) - { - pod_builder - .add_volume(Volume { - name: STACKABLE_LOG_CONFIG_MOUNT_DIR_NAME.to_string(), - config_map: Some(ConfigMapVolumeSource { - name: config_map.into(), - ..ConfigMapVolumeSource::default() - }), - ..Volume::default() - }) - .context(AddVolumeSnafu)?; - } else { - pod_builder - .add_volume(Volume { - name: STACKABLE_LOG_CONFIG_MOUNT_DIR_NAME.to_string(), - config_map: Some(ConfigMapVolumeSource { - name: rolegroup_ref.object_name(), - ..ConfigMapVolumeSource::default() - }), - ..Volume::default() - }) - .context(AddVolumeSnafu)?; - } - - add_graceful_shutdown_config(merged_config, &mut pod_builder).context(GracefulShutdownSnafu)?; - - if hive.has_kerberos_enabled() { - add_kerberos_pod_config(hive, hive_role, container_builder, &mut pod_builder) - .context(AddKerberosConfigSnafu)?; - } - - // this is the main container - pod_builder.add_container(container_builder.build()); - - // N.B. the vector container should *follow* the hive container so that the hive one is the - // default, is started first and can provide any dependencies that vector expects - if merged_config.logging.enable_vector_agent { - match &hive.spec.cluster_config.vector_aggregator_config_map_name { - Some(vector_aggregator_config_map_name) => { - pod_builder.add_container( - product_logging::framework::vector_container( - resolved_product_image, - STACKABLE_CONFIG_MOUNT_DIR_NAME, - STACKABLE_LOG_DIR_NAME, - merged_config.logging.containers.get(&Container::Vector), - ResourceRequirementsBuilder::new() - .with_cpu_request("250m") - .with_cpu_limit("500m") - .with_memory_request("128Mi") - .with_memory_limit("128Mi") - .build(), - vector_aggregator_config_map_name, - ) - .context(BuildVectorContainerSnafu)?, - ); - } - None => { - VectorAggregatorConfigMapMissingSnafu.fail()?; - } - } - } - - let mut pod_template = pod_builder.build_template(); - pod_template.merge_from(role.config.pod_overrides.clone()); - pod_template.merge_from(rolegroup.config.pod_overrides.clone()); - - Ok(StatefulSet { - metadata: ObjectMetaBuilder::new() - .name_and_namespace(hive) - .name(rolegroup_ref.object_name()) - .ownerreference_from_resource(hive, None, Some(true)) - .context(ObjectMissingMetadataForOwnerRefSnafu)? - .with_recommended_labels(&recommended_object_labels) - .context(MetadataBuildSnafu)? - .with_label(RESTART_CONTROLLER_ENABLED_LABEL.to_owned()) - .build(), - spec: Some(StatefulSetSpec { - pod_management_policy: Some("Parallel".to_string()), - replicas: rolegroup.replicas.map(i32::from), - selector: LabelSelector { - match_labels: Some( - Labels::role_group_selector( - hive, - APP_NAME, - &rolegroup_ref.role, - &rolegroup_ref.role_group, - ) - .context(LabelBuildSnafu)? - .into(), - ), - ..LabelSelector::default() - }, - service_name: Some(rolegroup_ref.rolegroup_headless_service_name()), - template: pod_template, - volume_claim_templates: Some(vec![pvc]), - ..StatefulSetSpec::default() - }), - status: None, - }) -} - pub fn error_policy( _obj: Arc>, error: &Error, _ctx: Arc, ) -> Action { match error { - // An invalid HBaseCluster was deserialized. Await for it to change. Error::InvalidHiveCluster { .. } => Action::await_change(), - _ => Action::requeue(*Duration::from_secs(5)), - } -} - -/// Creates recommended `ObjectLabels` to be used in deployed resources -pub fn build_recommended_labels<'a, T>( - owner: &'a T, - app_version: &'a str, - role: &'a str, - role_group: &'a str, -) -> ObjectLabels<'a, T> { - ObjectLabels { - owner, - app_name: APP_NAME, - app_version, - operator_name: OPERATOR_NAME, - controller_name: HIVE_CONTROLLER_NAME, - role, - role_group, + _ => Action::requeue(*Duration::from_secs(10)), } } diff --git a/rust/operator-binary/src/controller/apply.rs b/rust/operator-binary/src/controller/apply.rs new file mode 100644 index 00000000..4d226c8c --- /dev/null +++ b/rust/operator-binary/src/controller/apply.rs @@ -0,0 +1,246 @@ +use std::{hash::Hasher, marker::PhantomData}; + +use fnv::FnvHasher; +use snafu::{ResultExt, Snafu}; +use stackable_operator::{ + builder::{configmap::ConfigMapBuilder, meta::ObjectMetaBuilder}, + cluster_resources::ClusterResources, + crd::listener::v1alpha1::Listener, + k8s_openapi::api::core::v1::ConfigMap, + kvp::{Labels, ObjectLabels}, +}; +use strum::{EnumDiscriminants, IntoStaticStr}; + +use super::{Applied, HIVE_CONTROLLER_NAME, KubernetesResources, Prepared, ValidatedHiveCluster}; +use crate::{OPERATOR_NAME, crd::APP_NAME, listener::build_listener_connection_string}; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("failed to apply service account"))] + ApplyServiceAccount { + source: stackable_operator::cluster_resources::Error, + }, + + #[snafu(display("failed to apply role binding"))] + ApplyRoleBinding { + source: stackable_operator::cluster_resources::Error, + }, + + #[snafu(display("failed to apply ConfigMap for {name}"))] + ApplyConfigMap { + source: stackable_operator::cluster_resources::Error, + name: String, + }, + + #[snafu(display("failed to apply Service for {name}"))] + ApplyService { + source: stackable_operator::cluster_resources::Error, + name: String, + }, + + #[snafu(display("failed to apply StatefulSet for {name}"))] + ApplyStatefulSet { + source: stackable_operator::cluster_resources::Error, + name: String, + }, + + #[snafu(display("failed to apply PodDisruptionBudget"))] + ApplyPdb { + source: stackable_operator::cluster_resources::Error, + }, + + #[snafu(display("failed to apply listener for {role}"))] + ApplyListener { + source: stackable_operator::cluster_resources::Error, + role: String, + }, + + #[snafu(display("failed to build discovery ConfigMap"))] + BuildDiscoveryConfigMap { + source: stackable_operator::builder::configmap::Error, + }, + + #[snafu(display("failed to apply discovery ConfigMap"))] + ApplyDiscoveryConfigMap { + source: stackable_operator::cluster_resources::Error, + }, + + #[snafu(display("failed to build discovery ConfigMap metadata"))] + DiscoveryMetadataBuild { + source: stackable_operator::builder::meta::Error, + }, + + #[snafu(display("failed to configure listener connection string"))] + ListenerConfiguration { source: crate::listener::Error }, + + #[snafu(display("failed to delete orphaned resources"))] + DeleteOrphanedResources { + source: stackable_operator::cluster_resources::Error, + }, +} + +pub struct Applier<'a> { + client: &'a stackable_operator::client::Client, + cluster_resources: ClusterResources<'a>, +} + +impl<'a> Applier<'a> { + pub fn new( + client: &'a stackable_operator::client::Client, + cluster_resources: ClusterResources<'a>, + ) -> Self { + Self { + client, + cluster_resources, + } + } + + pub async fn apply( + mut self, + prepared: KubernetesResources, + validated: &ValidatedHiveCluster, + ) -> Result, Error> { + // RBAC + for sa in prepared.service_accounts { + self.cluster_resources + .add(self.client, sa) + .await + .context(ApplyServiceAccountSnafu)?; + } + for rb in prepared.role_bindings { + self.cluster_resources + .add(self.client, rb) + .await + .context(ApplyRoleBindingSnafu)?; + } + + // ConfigMaps + for cm in prepared.config_maps { + let name = cm.metadata.name.clone().unwrap_or_default(); + self.cluster_resources + .add(self.client, cm) + .await + .context(ApplyConfigMapSnafu { name })?; + } + + // Services + for svc in prepared.services { + let name = svc.metadata.name.clone().unwrap_or_default(); + self.cluster_resources + .add(self.client, svc) + .await + .context(ApplyServiceSnafu { name })?; + } + + // StatefulSets — applied after ConfigMaps to prevent unnecessary Pod restarts + let mut applied_stateful_sets = Vec::new(); + for ss in prepared.stateful_sets { + let name = ss.metadata.name.clone().unwrap_or_default(); + let applied = self + .cluster_resources + .add(self.client, ss) + .await + .context(ApplyStatefulSetSnafu { name })?; + applied_stateful_sets.push(applied); + } + + // PDBs + for pdb in prepared.pod_disruption_budgets { + self.cluster_resources + .add(self.client, pdb) + .await + .context(ApplyPdbSnafu)?; + } + + // Listeners + discovery ConfigMaps + // Discovery ConfigMaps depend on applied Listener status (for connection string building). + // This is hive-specific: the Listener must be applied first, then we read its status + // to build the connection string in the discovery ConfigMap. + let mut discovery_hash = FnvHasher::with_key(0); + + for listener in prepared.listeners { + let role = listener + .metadata + .labels + .as_ref() + .and_then(|l| l.get("app.kubernetes.io/component")) + .cloned() + .unwrap_or_else(|| "unknown".to_string()); + + let applied_listener: Listener = self + .cluster_resources + .add(self.client, listener) + .await + .context(ApplyListenerSnafu { role: role.clone() })?; + + for discovery_cm in build_discovery_configmaps(validated, &role, applied_listener)? { + let applied_cm = self + .cluster_resources + .add(self.client, discovery_cm) + .await + .context(ApplyDiscoveryConfigMapSnafu)?; + if let Some(generation) = applied_cm.metadata.resource_version { + discovery_hash.write(generation.as_bytes()); + } + } + } + + self.cluster_resources + .delete_orphaned_resources(self.client) + .await + .context(DeleteOrphanedResourcesSnafu)?; + + Ok(KubernetesResources { + stateful_sets: applied_stateful_sets, + config_maps: vec![], + services: vec![], + service_accounts: vec![], + role_bindings: vec![], + pod_disruption_budgets: vec![], + listeners: vec![], + discovery_hash: Some(discovery_hash.finish().to_string()), + _status: PhantomData, + }) + } +} + +fn build_discovery_configmaps( + validated: &ValidatedHiveCluster, + role: &str, + listener: Listener, +) -> Result, Error> { + let name = validated.name.to_string(); + + let recommended_object_labels = ObjectLabels { + owner: validated, + app_name: APP_NAME, + app_version: &validated.image.app_version_label_value, + operator_name: OPERATOR_NAME, + controller_name: HIVE_CONTROLLER_NAME, + role, + role_group: "discovery", + }; + + let labels = Labels::recommended(&recommended_object_labels) + .expect("Labels should be created because the cluster name is a valid label value"); + + let connection_string = build_listener_connection_string(listener, &role.to_string(), None) + .context(ListenerConfigurationSnafu)?; + + let mut cm_builder = ConfigMapBuilder::new(); + cm_builder.metadata( + ObjectMetaBuilder::new() + .name(&name) + .namespace(&validated.namespace) + .ownerreference_from_resource(validated, None, Some(true)) + .context(DiscoveryMetadataBuildSnafu)? + .with_labels(labels) + .build(), + ); + cm_builder.add_data("HIVE", connection_string); + + let discovery_cm = cm_builder.build().context(BuildDiscoveryConfigMapSnafu)?; + + Ok(vec![discovery_cm]) +} diff --git a/rust/operator-binary/src/controller/build.rs b/rust/operator-binary/src/controller/build.rs new file mode 100644 index 00000000..3366e54a --- /dev/null +++ b/rust/operator-binary/src/controller/build.rs @@ -0,0 +1,847 @@ +use std::marker::PhantomData; + +use stackable_operator::{ + builder::{configmap::ConfigMapBuilder, meta::ObjectMetaBuilder}, + commons::rbac::build_rbac_resources, + constants::RESTART_CONTROLLER_ENABLED_LABEL, + crd::listener::v1alpha1::{Listener, ListenerPort, ListenerSpec}, + k8s_openapi::{ + DeepMerge, + api::{ + apps::v1::{StatefulSet, StatefulSetSpec}, + core::v1::{ + Affinity, ConfigMapVolumeSource, Container as K8sContainer, ContainerPort, + EmptyDirVolumeSource, PodSpec, PodTemplateSpec, Probe, Service, ServicePort, + ServiceSpec, TCPSocketAction, Volume, VolumeMount, + }, + }, + apimachinery::pkg::{ + api::resource::Quantity, apis::meta::v1::LabelSelector, util::intstr::IntOrString, + }, + }, + kvp::{Annotations, Labels}, + product_logging, + role_utils::RoleGroupRef, +}; + +use super::{ + HIVE_CONTROLLER_NAME, KubernetesResources, Prepared, ValidatedHiveCluster, + ValidatedRoleGroupConfig, validate::MAX_HIVE_LOG_FILES_SIZE, +}; +use crate::{ + OPERATOR_NAME, + crd::{ + APP_NAME, Container, HIVE_PORT, HIVE_PORT_NAME, HIVE_SITE_XML, HiveRole, + JVM_SECURITY_PROPERTIES_FILE, METRICS_PORT, METRICS_PORT_NAME, STACKABLE_CONFIG_DIR, + STACKABLE_CONFIG_DIR_NAME, STACKABLE_CONFIG_MOUNT_DIR, STACKABLE_CONFIG_MOUNT_DIR_NAME, + STACKABLE_LOG_CONFIG_MOUNT_DIR, STACKABLE_LOG_CONFIG_MOUNT_DIR_NAME, STACKABLE_LOG_DIR, + STACKABLE_LOG_DIR_NAME, + }, + framework, + framework::builder::meta::ownerreference_from_resource, + listener::{LISTENER_VOLUME_DIR, LISTENER_VOLUME_NAME}, + product_logging::extend_role_group_config_map, +}; + +pub fn build(validated: &ValidatedHiveCluster) -> KubernetesResources { + let mut stateful_sets = Vec::new(); + let mut config_maps = Vec::new(); + let mut services = Vec::new(); + let mut service_accounts = Vec::new(); + let mut role_bindings = Vec::new(); + let mut pod_disruption_budgets = Vec::new(); + let mut listeners = Vec::new(); + + // RBAC + let required_labels = { + use crate::framework::types::operator::*; + framework::kvp::label::role_selector( + validated, + &ProductName::from_str_unsafe(APP_NAME), + &RoleName::from_str_unsafe(&HiveRole::MetaStore.to_string()), + ) + }; + let (rbac_sa, rbac_rolebinding) = build_rbac_resources(validated, APP_NAME, required_labels) + .expect("RBAC resources should be created"); + service_accounts.push(rbac_sa); + role_bindings.push(rbac_rolebinding); + + for (role, rg_configs) in &validated.role_groups { + for (rolegroup_name, rg_config) in rg_configs { + let pod_data = &validated.precomputed_pod_data[role][rolegroup_name]; + let rolegroup_ref = validated.rolegroup_ref(role, rolegroup_name); + + // ConfigMap + config_maps.push(build_rolegroup_config_map( + validated, + &rolegroup_ref, + rg_config, + )); + + // Services + services.push(build_rolegroup_headless_service(validated, &rolegroup_ref)); + services.push(build_rolegroup_metrics_service(validated, &rolegroup_ref)); + + // StatefulSet + stateful_sets.push(build_rolegroup_statefulset( + validated, + &rolegroup_ref, + rg_config, + pod_data, + )); + } + + // PDB + if let Some(role_config) = validated.role_configs.get(role) { + if role_config.pdb_enabled { + let max_unavailable = role_config.pdb_max_unavailable; + let pdb_resource = { + use crate::framework::types::operator::*; + framework::builder::pdb::pod_disruption_budget_builder_with_role( + validated, + &ProductName::from_str_unsafe(APP_NAME), + &RoleName::from_str_unsafe(&role.to_string()), + &OperatorName::from_str_unsafe(OPERATOR_NAME), + &ControllerName::from_str_unsafe(HIVE_CONTROLLER_NAME), + ) + .with_max_unavailable(max_unavailable) + .build() + }; + pod_disruption_budgets.push(pdb_resource); + } + + // Listener + listeners.push(build_role_listener(validated, role, role_config)); + } + } + + KubernetesResources { + stateful_sets, + config_maps, + services, + service_accounts, + role_bindings, + pod_disruption_budgets, + listeners, + discovery_hash: None, + _status: PhantomData, + } +} + +fn recommended_labels(validated: &ValidatedHiveCluster, role: &str, role_group: &str) -> Labels { + use crate::framework::types::operator::*; + framework::kvp::label::recommended_labels( + validated, + &ProductName::from_str_unsafe(APP_NAME), + &ProductVersion::from_str_unsafe(&validated.image.app_version_label_value.to_string()), + &OperatorName::from_str_unsafe(OPERATOR_NAME), + &ControllerName::from_str_unsafe(HIVE_CONTROLLER_NAME), + &RoleName::from_str_unsafe(role), + &RoleGroupName::from_str_unsafe(role_group), + ) +} + +fn role_group_selector_labels( + validated: &ValidatedHiveCluster, + role: &str, + role_group: &str, +) -> Labels { + use crate::framework::types::operator::*; + framework::kvp::label::role_group_selector( + validated, + &ProductName::from_str_unsafe(APP_NAME), + &RoleName::from_str_unsafe(role), + &RoleGroupName::from_str_unsafe(role_group), + ) +} + +fn build_rolegroup_config_map( + validated: &ValidatedHiveCluster, + rolegroup: &RoleGroupRef, + rg_config: &ValidatedRoleGroupConfig, +) -> stackable_operator::k8s_openapi::api::core::v1::ConfigMap { + let labels = recommended_labels(validated, &rolegroup.role, &rolegroup.role_group); + + let metadata = ObjectMetaBuilder::new() + .name(rolegroup.object_name()) + .namespace(&validated.namespace) + .ownerreference(ownerreference_from_resource(validated, None, Some(true))) + .with_labels(labels) + .build(); + + let mut cm_builder = ConfigMapBuilder::new(); + cm_builder.metadata(metadata); + cm_builder.add_data(HIVE_SITE_XML, &rg_config.hive_site_xml_content); + cm_builder.add_data( + JVM_SECURITY_PROPERTIES_FILE, + &rg_config.jvm_security_properties_content, + ); + + // Kerberos core-site.xml (pre-computed during validation) + if let Some(ref core_site_xml) = rg_config.core_site_xml_content { + cm_builder.add_data(crate::crd::CORE_SITE_XML, core_site_xml); + } + + // Logging config + let logging = build_logging_for_config_map(rg_config); + extend_role_group_config_map(rolegroup, &logging, &mut cm_builder) + .expect("Logging configuration should be valid because it was validated earlier"); + + cm_builder + .build() + .expect("ConfigMap should be created because all required fields are set") +} + +fn build_rolegroup_headless_service( + validated: &ValidatedHiveCluster, + rolegroup: &RoleGroupRef, +) -> Service { + let labels = recommended_labels(validated, &rolegroup.role, &rolegroup.role_group); + let selector = role_group_selector_labels(validated, &rolegroup.role, &rolegroup.role_group); + + Service { + metadata: ObjectMetaBuilder::new() + .name(rolegroup.rolegroup_headless_service_name()) + .namespace(&validated.namespace) + .ownerreference(ownerreference_from_resource(validated, None, Some(true))) + .with_labels(labels) + .build(), + spec: Some(ServiceSpec { + type_: Some("ClusterIP".to_string()), + cluster_ip: Some("None".to_string()), + ports: Some(service_ports()), + selector: Some(selector.into()), + publish_not_ready_addresses: Some(true), + ..ServiceSpec::default() + }), + status: None, + } +} + +fn build_rolegroup_metrics_service( + validated: &ValidatedHiveCluster, + rolegroup: &RoleGroupRef, +) -> Service { + let labels = recommended_labels(validated, &rolegroup.role, &rolegroup.role_group); + let selector = role_group_selector_labels(validated, &rolegroup.role, &rolegroup.role_group); + + Service { + metadata: ObjectMetaBuilder::new() + .name(rolegroup.rolegroup_metrics_service_name()) + .namespace(&validated.namespace) + .ownerreference(ownerreference_from_resource(validated, None, Some(true))) + .with_labels(labels.clone()) + .with_labels(prometheus_labels()) + .with_annotations(prometheus_annotations()) + .build(), + spec: Some(ServiceSpec { + type_: Some("ClusterIP".to_string()), + cluster_ip: Some("None".to_string()), + ports: Some(metrics_ports()), + selector: Some(selector.into()), + publish_not_ready_addresses: Some(true), + ..ServiceSpec::default() + }), + status: None, + } +} + +fn build_rolegroup_statefulset( + validated: &ValidatedHiveCluster, + rolegroup_ref: &RoleGroupRef, + rg_config: &ValidatedRoleGroupConfig, + pod_data: &super::PrecomputedPodData, +) -> StatefulSet { + let labels = recommended_labels(validated, &rolegroup_ref.role, &rolegroup_ref.role_group); + let selector = + role_group_selector_labels(validated, &rolegroup_ref.role, &rolegroup_ref.role_group); + + // Build main container + let mut volume_mounts = vec![ + VolumeMount { + name: STACKABLE_CONFIG_DIR_NAME.to_string(), + mount_path: STACKABLE_CONFIG_DIR.to_string(), + ..VolumeMount::default() + }, + VolumeMount { + name: STACKABLE_CONFIG_MOUNT_DIR_NAME.to_string(), + mount_path: STACKABLE_CONFIG_MOUNT_DIR.to_string(), + ..VolumeMount::default() + }, + VolumeMount { + name: STACKABLE_LOG_DIR_NAME.to_string(), + mount_path: STACKABLE_LOG_DIR.to_string(), + ..VolumeMount::default() + }, + VolumeMount { + name: STACKABLE_LOG_CONFIG_MOUNT_DIR_NAME.to_string(), + mount_path: STACKABLE_LOG_CONFIG_MOUNT_DIR.to_string(), + ..VolumeMount::default() + }, + VolumeMount { + name: LISTENER_VOLUME_NAME.to_string(), + mount_path: LISTENER_VOLUME_DIR.to_string(), + ..VolumeMount::default() + }, + ]; + volume_mounts.extend(pod_data.hdfs_volume_mounts.clone()); + volume_mounts.extend(pod_data.s3_volume_mounts.clone()); + volume_mounts.extend(pod_data.opa_volume_mounts.clone()); + volume_mounts.extend(pod_data.kerberos_volume_mounts.clone()); + + let main_container = K8sContainer { + name: APP_NAME.to_string(), + image: Some(validated.image.image.clone()), + image_pull_policy: Some(validated.image.image_pull_policy.clone()), + command: Some(vec![ + "/bin/bash".to_string(), + "-x".to_string(), + "-euo".to_string(), + "pipefail".to_string(), + "-c".to_string(), + ]), + args: Some(pod_data.commands.clone()), + env: Some(pod_data.env_vars.clone()), + ports: Some(vec![ + ContainerPort { + name: Some(HIVE_PORT_NAME.to_string()), + container_port: HIVE_PORT.into(), + ..ContainerPort::default() + }, + ContainerPort { + name: Some(METRICS_PORT_NAME.to_string()), + container_port: METRICS_PORT.into(), + ..ContainerPort::default() + }, + ]), + volume_mounts: Some(volume_mounts), + resources: Some(rg_config.resources.clone().into()), + readiness_probe: Some(Probe { + initial_delay_seconds: Some(10), + period_seconds: Some(10), + failure_threshold: Some(5), + tcp_socket: Some(TCPSocketAction { + port: IntOrString::String(HIVE_PORT_NAME.to_string()), + ..TCPSocketAction::default() + }), + ..Probe::default() + }), + liveness_probe: Some(Probe { + initial_delay_seconds: Some(30), + period_seconds: Some(10), + tcp_socket: Some(TCPSocketAction { + port: IntOrString::String(HIVE_PORT_NAME.to_string()), + ..TCPSocketAction::default() + }), + ..Probe::default() + }), + ..K8sContainer::default() + }; + + let mut containers = vec![main_container]; + if let Some(ref vector) = pod_data.vector_container { + containers.push(vector.clone()); + } + + // Build volumes + let mut volumes = vec![ + Volume { + name: STACKABLE_CONFIG_DIR_NAME.to_string(), + empty_dir: Some(EmptyDirVolumeSource { + medium: None, + size_limit: Some(Quantity("10Mi".to_string())), + }), + ..Volume::default() + }, + Volume { + name: STACKABLE_CONFIG_MOUNT_DIR_NAME.to_string(), + config_map: Some(ConfigMapVolumeSource { + name: rolegroup_ref.object_name(), + ..Default::default() + }), + ..Default::default() + }, + ]; + + // Log dir volume + volumes.push(Volume { + name: STACKABLE_LOG_DIR_NAME.to_string(), + empty_dir: Some(EmptyDirVolumeSource { + medium: None, + size_limit: Some(product_logging::framework::calculate_log_volume_size_limit( + &[MAX_HIVE_LOG_FILES_SIZE], + )), + }), + ..Volume::default() + }); + + // Log config mount volume — custom or from rolegroup configmap + let log_config_volume = match &rg_config.logging.hive_container { + super::ValidatedContainerLogConfigChoice::Custom(cm_name) => Volume { + name: STACKABLE_LOG_CONFIG_MOUNT_DIR_NAME.to_string(), + config_map: Some(ConfigMapVolumeSource { + name: cm_name.to_string(), + ..ConfigMapVolumeSource::default() + }), + ..Volume::default() + }, + _ => Volume { + name: STACKABLE_LOG_CONFIG_MOUNT_DIR_NAME.to_string(), + config_map: Some(ConfigMapVolumeSource { + name: rolegroup_ref.object_name(), + ..ConfigMapVolumeSource::default() + }), + ..Volume::default() + }, + }; + volumes.push(log_config_volume); + + volumes.extend(pod_data.hdfs_volumes.clone()); + volumes.extend(pod_data.s3_volumes.clone()); + volumes.extend(pod_data.opa_volumes.clone()); + volumes.extend(pod_data.kerberos_volumes.clone()); + + let affinity = Affinity { + pod_affinity: rg_config.affinity.pod_affinity.clone(), + pod_anti_affinity: rg_config.affinity.pod_anti_affinity.clone(), + node_affinity: rg_config.affinity.node_affinity.clone(), + }; + + let pod_metadata = ObjectMetaBuilder::new().with_labels(labels.clone()).build(); + + let mut pod_template = PodTemplateSpec { + metadata: Some(pod_metadata), + spec: Some(PodSpec { + affinity: Some(affinity), + containers, + image_pull_secrets: validated.image.pull_secrets.clone(), + service_account_name: Some(pod_data.service_account_name.clone()), + security_context: Some( + stackable_operator::builder::pod::security::PodSecurityContextBuilder::new() + .fs_group(1000) + .build(), + ), + termination_grace_period_seconds: Some( + rg_config + .graceful_shutdown_timeout + .as_secs() + .try_into() + .unwrap_or(i64::MAX), + ), + volumes: Some(volumes), + ..PodSpec::default() + }), + }; + pod_template.merge_from(pod_data.pod_overrides.clone()); + + StatefulSet { + metadata: ObjectMetaBuilder::new() + .name(rolegroup_ref.object_name()) + .namespace(&validated.namespace) + .ownerreference(ownerreference_from_resource(validated, None, Some(true))) + .with_labels(labels) + .with_label(RESTART_CONTROLLER_ENABLED_LABEL.to_owned()) + .build(), + spec: Some(StatefulSetSpec { + pod_management_policy: Some("Parallel".to_string()), + replicas: pod_data.replicas.map(i32::from), + selector: LabelSelector { + match_labels: Some(selector.into()), + ..LabelSelector::default() + }, + service_name: Some(rolegroup_ref.rolegroup_headless_service_name()), + template: pod_template, + volume_claim_templates: Some(vec![pod_data.listener_volume_claim_template.clone()]), + ..StatefulSetSpec::default() + }), + status: None, + } +} + +fn build_role_listener( + validated: &ValidatedHiveCluster, + role: &HiveRole, + role_config: &super::ValidatedRoleConfig, +) -> Listener { + let labels = recommended_labels(validated, &role.to_string(), "none"); + + Listener { + metadata: ObjectMetaBuilder::new() + .name(&role_config.listener_name) + .namespace(&validated.namespace) + .ownerreference(ownerreference_from_resource(validated, None, Some(true))) + .with_labels(labels) + .build(), + spec: ListenerSpec { + class_name: Some(role_config.listener_class.clone()), + ports: Some(listener_ports()), + ..Default::default() + }, + status: None, + } +} + +fn service_ports() -> Vec { + vec![ServicePort { + name: Some(HIVE_PORT_NAME.to_string()), + port: HIVE_PORT.into(), + protocol: Some("TCP".to_string()), + ..ServicePort::default() + }] +} + +fn metrics_ports() -> Vec { + vec![ServicePort { + name: Some(METRICS_PORT_NAME.to_string()), + port: METRICS_PORT.into(), + protocol: Some("TCP".to_string()), + ..ServicePort::default() + }] +} + +fn listener_ports() -> Vec { + vec![ListenerPort { + name: HIVE_PORT_NAME.to_owned(), + port: HIVE_PORT.into(), + protocol: Some("TCP".to_owned()), + }] +} + +fn prometheus_labels() -> Labels { + Labels::try_from([("prometheus.io/scrape", "true")]).expect("should be a valid label") +} + +fn prometheus_annotations() -> Annotations { + Annotations::try_from([ + ("prometheus.io/path".to_owned(), "/metrics".to_owned()), + ("prometheus.io/port".to_owned(), METRICS_PORT.to_string()), + ("prometheus.io/scheme".to_owned(), "http".to_owned()), + ("prometheus.io/scrape".to_owned(), "true".to_owned()), + ]) + .expect("should be valid annotations") +} + +fn build_logging_for_config_map( + rg_config: &ValidatedRoleGroupConfig, +) -> stackable_operator::product_logging::spec::Logging { + let mut logging = stackable_operator::product_logging::spec::Logging { + enable_vector_agent: rg_config.logging.is_vector_agent_enabled(), + containers: std::collections::BTreeMap::new(), + }; + logging.containers.insert( + Container::Hive, + rg_config + .logging + .hive_container + .to_raw_container_log_config(), + ); + if let Some(ref vector) = rg_config.logging.vector_container { + logging.containers.insert( + Container::Vector, + vector.log_config.to_raw_container_log_config(), + ); + } + logging +} + +#[cfg(test)] +mod tests { + use std::{collections::BTreeMap, str::FromStr}; + + use stackable_operator::{ + commons::product_image_selection::ResolvedProductImage, + k8s_openapi::api::core::v1::{PersistentVolumeClaim, PodTemplateSpec}, + kvp::LabelValue, + }; + + use super::*; + use crate::{ + controller::{ + PrecomputedPodData, ValidatedHiveCluster, ValidatedLogging, ValidatedRoleConfig, + ValidatedRoleGroupConfig, + }, + crd::{HiveRole, MetastoreStorageConfig}, + framework::{ + product_logging::framework::ValidatedContainerLogConfigChoice, + types::{ + kubernetes::{NamespaceName, Uid}, + operator::ClusterName, + }, + }, + listener::DEFAULT_LISTENER_CLASS, + }; + + fn test_resolved_product_image() -> ResolvedProductImage { + ResolvedProductImage { + product_version: "4.0.1".to_string(), + app_version_label_value: LabelValue::from_str("4.0.1").unwrap(), + image: "oci.stackable.tech/sdp/hive:4.0.1-stackable0.0.0-dev".to_string(), + image_pull_policy: "IfNotPresent".to_string(), + pull_secrets: None, + } + } + + fn test_validated_rg_config() -> ValidatedRoleGroupConfig { + use stackable_operator::{ + commons::resources::{CpuLimits, MemoryLimits, NoRuntimeLimits, Resources}, + k8s_openapi::apimachinery::pkg::api::resource::Quantity, + }; + + ValidatedRoleGroupConfig { + resources: Resources { + cpu: CpuLimits { + min: Some(Quantity("250m".to_owned())), + max: Some(Quantity("1000m".to_owned())), + }, + memory: MemoryLimits { + limit: Some(Quantity("768Mi".to_owned())), + runtime_limits: NoRuntimeLimits {}, + }, + storage: MetastoreStorageConfig { + data: stackable_operator::commons::resources::PvcConfig { + capacity: Some(Quantity("0Mi".to_owned())), + storage_class: None, + selectors: None, + }, + }, + }, + logging: ValidatedLogging { + hive_container: ValidatedContainerLogConfigChoice::Automatic( + stackable_operator::product_logging::spec::AutomaticContainerLogConfig::default( + ), + ), + vector_container: None, + }, + affinity: Default::default(), + graceful_shutdown_timeout: + stackable_operator::shared::time::Duration::from_minutes_unchecked(5), + hive_site_xml_content: "".to_string(), + jvm_security_properties_content: String::new(), + core_site_xml_content: None, + } + } + + fn test_validated_cluster() -> ValidatedHiveCluster { + let rg_config = test_validated_rg_config(); + let pod_data = PrecomputedPodData { + env_vars: vec![], + commands: vec!["echo test".to_string()], + kerberos_volumes: vec![], + kerberos_volume_mounts: vec![], + s3_volumes: vec![], + s3_volume_mounts: vec![], + hdfs_volumes: vec![], + hdfs_volume_mounts: vec![], + opa_volumes: vec![], + opa_volume_mounts: vec![], + vector_container: None, + service_account_name: format!("{}-test-hive-serviceaccount", APP_NAME), + replicas: Some(2), + pod_overrides: PodTemplateSpec::default(), + listener_volume_claim_template: PersistentVolumeClaim::default(), + }; + + let mut role_groups = BTreeMap::new(); + let mut rg_configs = BTreeMap::new(); + rg_configs.insert("default".to_string(), rg_config); + role_groups.insert(HiveRole::MetaStore, rg_configs); + + let mut precomputed = BTreeMap::new(); + let mut pod_data_map = BTreeMap::new(); + pod_data_map.insert("default".to_string(), pod_data); + precomputed.insert(HiveRole::MetaStore, pod_data_map); + + let mut role_configs = BTreeMap::new(); + role_configs.insert( + HiveRole::MetaStore, + ValidatedRoleConfig { + pdb_enabled: true, + pdb_max_unavailable: 1, + listener_class: DEFAULT_LISTENER_CLASS.to_string(), + listener_name: "test-hive-metastore".to_string(), + }, + ); + + ValidatedHiveCluster::new( + test_resolved_product_image(), + ClusterName::from_str_unsafe("test-hive"), + NamespaceName::from_str_unsafe("default"), + Uid::from_str_unsafe("c27b3971-ca72-42c1-80a4-abdfc1db0ddd"), + role_groups, + precomputed, + role_configs, + ) + } + + #[test] + fn build_produces_expected_resource_counts() { + let validated = test_validated_cluster(); + + let resources = build(&validated); + + assert_eq!( + resources.service_accounts.len(), + 1, + "one service account for the role" + ); + assert_eq!( + resources.role_bindings.len(), + 1, + "one role binding for the role" + ); + assert_eq!( + resources.config_maps.len(), + 1, + "one config map per role group" + ); + assert_eq!( + resources.services.len(), + 2, + "headless + metrics service per role group" + ); + assert_eq!( + resources.stateful_sets.len(), + 1, + "one statefulset per role group" + ); + assert_eq!( + resources.pod_disruption_budgets.len(), + 1, + "one PDB per role" + ); + assert_eq!(resources.listeners.len(), 1, "one listener per role"); + } + + #[test] + fn build_statefulset_has_correct_name_and_namespace() { + let validated = test_validated_cluster(); + + let resources = build(&validated); + let sts = &resources.stateful_sets[0]; + + assert_eq!( + sts.metadata.name.as_deref(), + Some("test-hive-metastore-default") + ); + assert_eq!(sts.metadata.namespace.as_deref(), Some("default")); + } + + #[test] + fn build_statefulset_has_correct_replicas() { + let validated = test_validated_cluster(); + + let resources = build(&validated); + let sts = &resources.stateful_sets[0]; + + assert_eq!(sts.spec.as_ref().unwrap().replicas, Some(2),); + } + + #[test] + fn build_headless_service_is_cluster_ip_none() { + let validated = test_validated_cluster(); + + let resources = build(&validated); + let headless = resources + .services + .iter() + .find(|s| { + s.metadata + .name + .as_deref() + .is_some_and(|n| !n.contains("metrics")) + }) + .expect("headless service should exist"); + + let spec = headless.spec.as_ref().unwrap(); + assert_eq!(spec.cluster_ip.as_deref(), Some("None")); + assert_eq!(spec.type_.as_deref(), Some("ClusterIP")); + } + + #[test] + fn build_metrics_service_has_prometheus_annotations() { + let validated = test_validated_cluster(); + + let resources = build(&validated); + let metrics = resources + .services + .iter() + .find(|s| { + s.metadata + .name + .as_deref() + .is_some_and(|n| n.contains("metrics")) + }) + .expect("metrics service should exist"); + + let annotations = metrics.metadata.annotations.as_ref().unwrap(); + assert_eq!( + annotations.get("prometheus.io/scrape"), + Some(&"true".to_string()), + ); + assert_eq!( + annotations.get("prometheus.io/port"), + Some(&METRICS_PORT.to_string()), + ); + } + + #[test] + fn build_listener_has_correct_port() { + let validated = test_validated_cluster(); + + let resources = build(&validated); + let listener = &resources.listeners[0]; + + assert_eq!( + listener.metadata.name.as_deref(), + Some("test-hive-metastore") + ); + let ports = listener.spec.ports.as_ref().unwrap(); + assert_eq!(ports.len(), 1); + assert_eq!(ports[0].name, HIVE_PORT_NAME); + assert_eq!(ports[0].port, i32::from(HIVE_PORT)); + } + + #[test] + fn build_statefulset_has_owner_reference() { + let validated = test_validated_cluster(); + + let resources = build(&validated); + let sts = &resources.stateful_sets[0]; + + let owner_refs = sts.metadata.owner_references.as_ref().unwrap(); + assert_eq!(owner_refs.len(), 1); + assert_eq!(owner_refs[0].name, "test-hive"); + assert_eq!(owner_refs[0].uid, "c27b3971-ca72-42c1-80a4-abdfc1db0ddd"); + assert_eq!(owner_refs[0].controller, Some(true)); + } + + #[test] + fn build_statefulset_main_container_has_expected_ports() { + let validated = test_validated_cluster(); + + let resources = build(&validated); + let sts = &resources.stateful_sets[0]; + + let containers = &sts + .spec + .as_ref() + .unwrap() + .template + .spec + .as_ref() + .unwrap() + .containers; + assert_eq!( + containers.len(), + 1, + "no vector container when vector is disabled" + ); + + let main = &containers[0]; + assert_eq!(main.name, APP_NAME); + let ports = main.ports.as_ref().unwrap(); + let port_names: Vec<&str> = ports.iter().filter_map(|p| p.name.as_deref()).collect(); + assert!(port_names.contains(&HIVE_PORT_NAME)); + assert!(port_names.contains(&METRICS_PORT_NAME)); + } +} diff --git a/rust/operator-binary/src/controller/dereference.rs b/rust/operator-binary/src/controller/dereference.rs new file mode 100644 index 00000000..caad8f24 --- /dev/null +++ b/rust/operator-binary/src/controller/dereference.rs @@ -0,0 +1,96 @@ +use snafu::{ResultExt, Snafu}; +use stackable_operator::{ + client::Client, commons::product_image_selection::ResolvedProductImage, crd::s3, + database_connections::drivers::jdbc::JdbcDatabaseConnectionDetails, kube::ResourceExt, + utils::cluster_info::KubernetesClusterInfo, +}; +use strum::{EnumDiscriminants, IntoStaticStr}; + +use crate::{config::opa::HiveOpaConfig, crd::v1alpha1}; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("failed to resolve product image"))] + ResolveProductImage { + source: stackable_operator::commons::product_image_selection::Error, + }, + + #[snafu(display("failed to configure S3 connection"))] + ConfigureS3Connection { + source: stackable_operator::crd::s3::v1alpha1::ConnectionError, + }, + + #[snafu(display("object defines no namespace"))] + ObjectHasNoNamespace, + + #[snafu(display("invalid metadata database connection"))] + InvalidMetadataDatabaseConnection { + source: stackable_operator::database_connections::Error, + }, + + #[snafu(display("invalid OpaConfig"))] + InvalidOpaConfig { + source: stackable_operator::commons::opa::Error, + }, +} + +pub struct DereferencedObjects { + pub resolved_product_image: ResolvedProductImage, + pub s3_connection_spec: Option, + pub metadata_database_connection_details: JdbcDatabaseConnectionDetails, + pub hive_opa_config: Option, + pub cluster_info: KubernetesClusterInfo, +} + +pub async fn dereference( + client: &Client, + hive: &v1alpha1::HiveCluster, + image_base_name: &str, + image_repository: &str, + pkg_version: &str, +) -> Result { + let resolved_product_image = hive + .spec + .image + .resolve(image_base_name, image_repository, pkg_version) + .context(ResolveProductImageSnafu)?; + + let s3_connection_spec = if let Some(s3) = &hive.spec.cluster_config.s3 { + Some( + s3.clone() + .resolve( + client, + &hive.namespace().ok_or(Error::ObjectHasNoNamespace)?, + ) + .await + .context(ConfigureS3ConnectionSnafu)?, + ) + } else { + None + }; + + let metadata_database_connection_details = hive + .spec + .cluster_config + .metadata_database + .jdbc_connection_details("METADATA") + .context(InvalidMetadataDatabaseConnectionSnafu)?; + + let hive_opa_config = match hive.get_opa_config() { + Some(opa_config) => Some( + HiveOpaConfig::from_opa_config(client, hive, opa_config) + .await + .context(InvalidOpaConfigSnafu)?, + ), + None => None, + }; + + Ok(DereferencedObjects { + resolved_product_image, + s3_connection_spec, + metadata_database_connection_details, + hive_opa_config, + cluster_info: client.kubernetes_cluster_info.clone(), + }) +} diff --git a/rust/operator-binary/src/controller/update_status.rs b/rust/operator-binary/src/controller/update_status.rs new file mode 100644 index 00000000..d2ecf160 --- /dev/null +++ b/rust/operator-binary/src/controller/update_status.rs @@ -0,0 +1,47 @@ +use snafu::{ResultExt, Snafu}; +use stackable_operator::status::condition::{ + compute_conditions, operations::ClusterOperationsConditionBuilder, + statefulset::StatefulSetConditionBuilder, +}; +use strum::{EnumDiscriminants, IntoStaticStr}; + +use super::{Applied, KubernetesResources}; +use crate::{ + OPERATOR_NAME, + crd::{HiveClusterStatus, v1alpha1}, +}; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("failed to apply status"))] + ApplyStatus { + source: stackable_operator::client::Error, + }, +} + +pub async fn update_status( + client: &stackable_operator::client::Client, + hive: &v1alpha1::HiveCluster, + applied: KubernetesResources, +) -> Result<(), Error> { + let mut ss_cond_builder = StatefulSetConditionBuilder::default(); + for ss in &applied.stateful_sets { + ss_cond_builder.add(ss.clone()); + } + + let cluster_operation_cond_builder = + ClusterOperationsConditionBuilder::new(&hive.spec.cluster_operation); + + let status = HiveClusterStatus { + discovery_hash: Some(applied.discovery_hash.as_deref().unwrap_or("").to_string()), + conditions: compute_conditions(hive, &[&ss_cond_builder, &cluster_operation_cond_builder]), + }; + + client + .apply_patch_status(OPERATOR_NAME, hive, &status) + .await + .context(ApplyStatusSnafu)?; + + Ok(()) +} diff --git a/rust/operator-binary/src/controller/validate.rs b/rust/operator-binary/src/controller/validate.rs new file mode 100644 index 00000000..923b5d6a --- /dev/null +++ b/rust/operator-binary/src/controller/validate.rs @@ -0,0 +1,976 @@ +use std::{ + borrow::Cow, + collections::{BTreeMap, HashMap}, + str::FromStr, +}; + +use product_config::{ + ProductConfigManager, + types::PropertyNameKind, + writer::{to_hadoop_xml, to_java_properties_string}, +}; +use snafu::{OptionExt, ResultExt, Snafu}; +use stackable_operator::{ + builder::pod::{ + PodBuilder, + container::ContainerBuilder, + resources::ResourceRequirementsBuilder, + volume::{ + ListenerOperatorVolumeSourceBuilder, ListenerReference, + SecretOperatorVolumeSourceBuilder, VolumeBuilder, + }, + }, + commons::{ + product_image_selection::ResolvedProductImage, + secret_class::SecretClassVolumeProvisionParts, + }, + crd::s3, + k8s_openapi::{ + DeepMerge, + api::core::v1::{EnvVar, PodTemplateSpec, VolumeMount}, + }, + kube::{Resource, ResourceExt}, + kvp::{Labels, ObjectLabels}, + product_config_utils::{transform_all_roles_to_config, validate_all_roles_and_groups_config}, + product_logging::{self, framework::LoggingError}, + role_utils::RoleGroupRef, +}; +use strum::{EnumDiscriminants, IntoStaticStr}; + +use super::{ + HIVE_CONTROLLER_NAME, PrecomputedPodData, ValidatedHiveCluster, ValidatedLogging, + ValidatedRoleConfig, ValidatedRoleGroupConfig, dereference::DereferencedObjects, +}; +use crate::{ + OPERATOR_NAME, + command::build_container_command_args, + config::{ + jvm::{construct_hadoop_heapsize_env, construct_non_heap_jvm_args}, + opa::{HiveOpaConfig, OPA_TLS_VOLUME_NAME}, + }, + crd::{ + APP_NAME, Container, HIVE_SITE_XML, HiveRole, JVM_SECURITY_PROPERTIES_FILE, + MetaStoreConfig, STACKABLE_CONFIG_DIR, STACKABLE_CONFIG_MOUNT_DIR_NAME, STACKABLE_LOG_DIR, + STACKABLE_LOG_DIR_NAME, + databases::{MetadataDatabaseConnection, derby_driver_class}, + v1alpha1, + }, + framework::{ + product_logging::framework::{ + VectorContainerLogConfig, validate_logging_configuration_for_container, + }, + types::{ + kubernetes::{ConfigMapName, NamespaceName, Uid}, + operator::ClusterName, + }, + }, + kerberos::{kerberos_config_properties, kerberos_container_start_commands}, + listener::LISTENER_VOLUME_NAME, +}; + +pub const MAX_HIVE_LOG_FILES_SIZE: stackable_operator::memory::MemoryQuantity = + stackable_operator::memory::MemoryQuantity { + value: 10.0, + unit: stackable_operator::memory::BinaryMultiple::Mebi, + }; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("failed to validate cluster name"))] + InvalidClusterName { + source: crate::framework::macros::attributed_string_type::Error, + }, + + #[snafu(display("failed to validate namespace"))] + InvalidNamespace { + source: crate::framework::macros::attributed_string_type::Error, + }, + + #[snafu(display("failed to validate UID"))] + InvalidUid { + source: crate::framework::macros::attributed_string_type::Error, + }, + + #[snafu(display("object defines no name"))] + MissingName, + + #[snafu(display("object defines no namespace"))] + MissingNamespace, + + #[snafu(display("object defines no UID"))] + MissingUid, + + #[snafu(display("object defines no metastore role"))] + NoMetaStoreRole, + + #[snafu(display("failed to generate product config"))] + GenerateProductConfig { + source: stackable_operator::product_config_utils::Error, + }, + + #[snafu(display("invalid product config"))] + InvalidProductConfig { + source: stackable_operator::product_config_utils::Error, + }, + + #[snafu(display("failed to resolve and merge resource config for role and role group"))] + FailedToResolveResourceConfig { source: crate::crd::Error }, + + #[snafu(display("failed to validate logging configuration"))] + InvalidLoggingConfig { + source: crate::framework::product_logging::framework::Error, + }, + + #[snafu(display("vector agent is enabled but vector aggregator ConfigMap is missing"))] + VectorAggregatorConfigMapMissing, + + #[snafu(display("failed to parse vector aggregator ConfigMap name"))] + InvalidVectorAggregatorConfigMapName { + source: crate::framework::macros::attributed_string_type::Error, + }, + + #[snafu(display("missing graceful shutdown timeout"))] + MissingGracefulShutdownTimeout, + + #[snafu(display("failed to construct JVM arguments"))] + ConstructJvmArguments { source: crate::config::jvm::Error }, + + #[snafu(display( + "Hive does not support skipping the verification of the tls enabled S3 server" + ))] + S3TlsNoVerificationNotSupported, + + #[snafu(display("failed to create hive container [{name}]"))] + FailedToCreateHiveContainer { + source: stackable_operator::builder::pod::container::Error, + name: String, + }, + + #[snafu(display("failed to build Labels"))] + LabelBuild { + source: stackable_operator::kvp::LabelError, + }, + + #[snafu(display("failed to build vector container"))] + BuildVectorContainer { source: LoggingError }, + + #[snafu(display("failed to add needed volume"))] + AddVolume { + source: stackable_operator::builder::pod::Error, + }, + + #[snafu(display("failed to add needed volumeMount"))] + AddVolumeMount { + source: stackable_operator::builder::pod::container::Error, + }, + + #[snafu(display("failed to build RBAC resources"))] + BuildRbacResources { + source: stackable_operator::commons::rbac::Error, + }, + + #[snafu(display("failed to get required Labels"))] + GetRequiredLabels { + source: + stackable_operator::kvp::KeyValuePairError, + }, + + #[snafu(display("failed to build listener volume"))] + BuildListenerVolume { + source: stackable_operator::builder::pod::volume::ListenerOperatorVolumeSourceBuilderError, + }, + + #[snafu(display("internal operator failure"))] + InternalOperatorFailure { source: crate::crd::Error }, + + #[snafu(display("failed to serialize [{JVM_SECURITY_PROPERTIES_FILE}] for {rolegroup}"))] + JvmSecurityProperties { + source: product_config::writer::PropertiesWriterError, + rolegroup: String, + }, + + #[snafu(display("failed to configure S3 connection"))] + ConfigureS3Connection { + source: stackable_operator::crd::s3::v1alpha1::ConnectionError, + }, + + #[snafu(display("failed to build TLS certificate SecretClass Volume"))] + TlsCertSecretClassVolumeBuild { + source: stackable_operator::builder::pod::volume::SecretOperatorVolumeSourceBuilderError, + }, +} + +pub fn validate_cluster( + hive: &v1alpha1::HiveCluster, + dereferenced: &DereferencedObjects, + product_config_manager: &ProductConfigManager, +) -> Result { + let cluster_name = ClusterName::from_str(&hive.meta().name.clone().context(MissingNameSnafu)?) + .context(InvalidClusterNameSnafu)?; + + let namespace = NamespaceName::from_str( + &hive + .meta() + .namespace + .clone() + .context(MissingNamespaceSnafu)?, + ) + .context(InvalidNamespaceSnafu)?; + + let uid = Uid::from_str(&hive.meta().uid.clone().context(MissingUidSnafu)?) + .context(InvalidUidSnafu)?; + + let role = hive.spec.metastore.as_ref().context(NoMetaStoreRoleSnafu)?; + + let validated_config = validate_all_roles_and_groups_config( + &dereferenced.resolved_product_image.product_version, + &transform_all_roles_to_config( + hive, + &[( + HiveRole::MetaStore.to_string(), + ( + vec![ + PropertyNameKind::Env, + PropertyNameKind::Cli, + PropertyNameKind::File(HIVE_SITE_XML.to_string()), + PropertyNameKind::File(JVM_SECURITY_PROPERTIES_FILE.to_string()), + ], + role.clone(), + ), + )] + .into(), + ) + .context(GenerateProductConfigSnafu)?, + product_config_manager, + false, + false, + ) + .context(InvalidProductConfigSnafu)?; + + let metastore_config = validated_config + .get(&HiveRole::MetaStore.to_string()) + .map(Cow::Borrowed) + .unwrap_or_default(); + + let mut role_groups = BTreeMap::new(); + let mut precomputed_pod_data = BTreeMap::new(); + let mut metastore_rg_configs = BTreeMap::new(); + let mut metastore_pod_data = BTreeMap::new(); + + let hive_namespace = namespace.to_string(); + + for (rolegroup_name, rolegroup_config) in metastore_config.iter() { + let rolegroup = hive.metastore_rolegroup_ref(rolegroup_name); + let merged_config = hive + .merged_config(&HiveRole::MetaStore, &rolegroup) + .context(FailedToResolveResourceConfigSnafu)?; + + let validated_rg_config = validate_role_group_config( + hive, + &hive_namespace, + &dereferenced.resolved_product_image, + &rolegroup, + rolegroup_config, + &dereferenced.metadata_database_connection_details, + dereferenced.s3_connection_spec.as_ref(), + &merged_config, + &dereferenced.hive_opa_config, + &dereferenced.cluster_info, + )?; + + let pod_data = compute_precomputed_pod_data( + hive, + &HiveRole::MetaStore, + &dereferenced.resolved_product_image, + &rolegroup, + rolegroup_config, + &dereferenced.metadata_database_connection_details, + dereferenced.s3_connection_spec.as_ref(), + &merged_config, + &dereferenced.hive_opa_config, + )?; + + metastore_rg_configs.insert(rolegroup_name.clone(), validated_rg_config); + metastore_pod_data.insert(rolegroup_name.clone(), pod_data); + } + + role_groups.insert(HiveRole::MetaStore, metastore_rg_configs); + precomputed_pod_data.insert(HiveRole::MetaStore, metastore_pod_data); + + let mut role_configs = BTreeMap::new(); + for role in [HiveRole::MetaStore] { + if let Some(role_config) = hive.role_config(&role) { + let pdb = &role_config.common.pod_disruption_budget; + role_configs.insert( + role.clone(), + ValidatedRoleConfig { + pdb_enabled: pdb.enabled, + pdb_max_unavailable: pdb.max_unavailable.unwrap_or(1), + listener_class: role_config.listener_class.clone(), + listener_name: hive.role_listener_name(&role), + }, + ); + } + } + + Ok(ValidatedHiveCluster::new( + dereferenced.resolved_product_image.clone(), + cluster_name, + namespace, + uid, + role_groups, + precomputed_pod_data, + role_configs, + )) +} + +#[allow(clippy::too_many_arguments)] +fn validate_role_group_config( + hive: &v1alpha1::HiveCluster, + hive_namespace: &str, + resolved_product_image: &ResolvedProductImage, + rolegroup: &RoleGroupRef, + role_group_config: &HashMap>, + database_connection_details: &stackable_operator::database_connections::drivers::jdbc::JdbcDatabaseConnectionDetails, + s3_connection_spec: Option<&s3::v1alpha1::ConnectionSpec>, + merged_config: &MetaStoreConfig, + hive_opa_config: &Option, + cluster_info: &stackable_operator::utils::cluster_info::KubernetesClusterInfo, +) -> Result { + let hive_container_log_config = + validate_logging_configuration_for_container(&merged_config.logging, Container::Hive) + .context(InvalidLoggingConfigSnafu)?; + + let vector_container = if merged_config.logging.enable_vector_agent { + ConfigMapName::from_str( + hive.spec + .cluster_config + .vector_aggregator_config_map_name + .as_deref() + .context(VectorAggregatorConfigMapMissingSnafu)?, + ) + .context(InvalidVectorAggregatorConfigMapNameSnafu)?; + + let vector_log_config = + validate_logging_configuration_for_container(&merged_config.logging, Container::Vector) + .context(InvalidLoggingConfigSnafu)?; + + Some(VectorContainerLogConfig { + log_config: vector_log_config, + }) + } else { + None + }; + + let validated_logging = ValidatedLogging { + hive_container: hive_container_log_config, + vector_container, + }; + + let graceful_shutdown_timeout = merged_config + .graceful_shutdown_timeout + .context(MissingGracefulShutdownTimeoutSnafu)?; + + // Pre-compute hive-site.xml content + let hive_site_xml_content = generate_hive_site_xml( + hive, + hive_namespace, + resolved_product_image, + role_group_config, + database_connection_details, + s3_connection_spec, + &hive.spec.cluster_config.metadata_database, + hive_opa_config.as_ref(), + cluster_info, + )?; + + // Pre-compute JVM security properties + let jvm_security_properties_content = + generate_jvm_security_properties(role_group_config, &rolegroup.role_group)?; + + // Pre-compute core-site.xml content (only needed when Kerberos is enabled without HDFS) + let core_site_xml_content = + if hive.has_kerberos_enabled() && hive.spec.cluster_config.hdfs.is_none() { + let mut data = std::collections::BTreeMap::new(); + data.insert( + "hadoop.security.authentication".to_string(), + Some("kerberos".to_string()), + ); + Some(product_config::writer::to_hadoop_xml(data.iter())) + } else { + None + }; + + Ok(ValidatedRoleGroupConfig { + resources: merged_config.resources.clone(), + logging: validated_logging, + affinity: merged_config.affinity.clone(), + graceful_shutdown_timeout, + hive_site_xml_content, + jvm_security_properties_content, + core_site_xml_content, + }) +} + +#[allow(clippy::too_many_arguments)] +fn generate_hive_site_xml( + hive: &v1alpha1::HiveCluster, + hive_namespace: &str, + resolved_product_image: &ResolvedProductImage, + role_group_config: &HashMap>, + database_connection_details: &stackable_operator::database_connections::drivers::jdbc::JdbcDatabaseConnectionDetails, + s3_connection_spec: Option<&s3::v1alpha1::ConnectionSpec>, + metadata_database: &MetadataDatabaseConnection, + hive_opa_config: Option<&HiveOpaConfig>, + cluster_info: &stackable_operator::utils::cluster_info::KubernetesClusterInfo, +) -> Result { + let mut hive_site_data = String::new(); + + for (property_name_kind, config) in role_group_config { + match property_name_kind { + PropertyNameKind::File(file_name) if file_name == HIVE_SITE_XML => { + let mut data = BTreeMap::new(); + + data.insert( + MetaStoreConfig::METASTORE_WAREHOUSE_DIR.to_string(), + Some("/stackable/warehouse".to_string()), + ); + + let driver = match metadata_database { + MetadataDatabaseConnection::Derby(_) => { + derby_driver_class(&resolved_product_image.product_version) + } + _ => database_connection_details.driver.as_str(), + }; + data.insert( + MetaStoreConfig::CONNECTION_DRIVER_NAME.to_string(), + Some(driver.to_owned()), + ); + data.insert( + MetaStoreConfig::CONNECTION_URL.to_string(), + Some(database_connection_details.connection_url.to_string()), + ); + if let Some(EnvVar { + name: username_env_name, + .. + }) = &database_connection_details.username_env + { + data.insert( + MetaStoreConfig::CONNECTION_USER_NAME.to_string(), + Some(format!("${{env:{username_env_name}}}")), + ); + } + if let Some(EnvVar { + name: password_env_name, + .. + }) = &database_connection_details.password_env + { + data.insert( + MetaStoreConfig::CONNECTION_PASSWORD.to_string(), + Some(format!("${{env:{password_env_name}}}")), + ); + } + + if let Some(s3) = s3_connection_spec { + data.insert( + MetaStoreConfig::S3_ENDPOINT.to_string(), + Some( + s3.endpoint() + .context(ConfigureS3ConnectionSnafu)? + .to_string(), + ), + ); + data.insert( + MetaStoreConfig::S3_REGION_NAME.to_string(), + Some(s3.region.name.clone()), + ); + if let Some((access_key_file, secret_key_file)) = s3.credentials_mount_paths() { + data.insert( + MetaStoreConfig::S3_ACCESS_KEY.to_string(), + Some(format!("${{file:UTF-8:{access_key_file}}}")), + ); + data.insert( + MetaStoreConfig::S3_SECRET_KEY.to_string(), + Some(format!("${{file:UTF-8:{secret_key_file}}}")), + ); + } + data.insert( + MetaStoreConfig::S3_SSL_ENABLED.to_string(), + Some(s3.tls.uses_tls().to_string()), + ); + data.insert( + MetaStoreConfig::S3_PATH_STYLE_ACCESS.to_string(), + Some((s3.access_style == s3::v1alpha1::S3AccessStyle::Path).to_string()), + ); + } + + for (property_name, property_value) in + kerberos_config_properties(hive, hive_namespace, cluster_info) + { + data.insert(property_name.to_string(), Some(property_value.to_string())); + } + + // OPA settings + if let Some(opa_config) = hive_opa_config { + data.extend( + opa_config + .as_config(&resolved_product_image.product_version) + .into_iter() + .map(|(k, v)| (k, Some(v))) + .collect::>>(), + ); + } + + // overrides + for (property_name, property_value) in config { + data.insert(property_name.to_string(), Some(property_value.to_string())); + } + + hive_site_data = to_hadoop_xml(data.iter()); + } + _ => {} + } + } + + Ok(hive_site_data) +} + +fn generate_jvm_security_properties( + role_group_config: &HashMap>, + rolegroup_name: &str, +) -> Result { + let jvm_sec_props: BTreeMap> = role_group_config + .get(&PropertyNameKind::File( + JVM_SECURITY_PROPERTIES_FILE.to_string(), + )) + .cloned() + .unwrap_or_default() + .into_iter() + .map(|(k, v)| (k, Some(v))) + .collect(); + + to_java_properties_string(jvm_sec_props.iter()).context(JvmSecurityPropertiesSnafu { + rolegroup: rolegroup_name.to_owned(), + }) +} + +#[allow(clippy::too_many_arguments)] +fn compute_precomputed_pod_data( + hive: &v1alpha1::HiveCluster, + hive_role: &HiveRole, + resolved_product_image: &ResolvedProductImage, + rolegroup_ref: &RoleGroupRef, + metastore_config: &HashMap>, + database_connection_details: &stackable_operator::database_connections::drivers::jdbc::JdbcDatabaseConnectionDetails, + s3_connection: Option<&s3::v1alpha1::ConnectionSpec>, + merged_config: &MetaStoreConfig, + hive_opa_config: &Option, +) -> Result { + let role = hive.role(hive_role).context(InternalOperatorFailureSnafu)?; + let rolegroup = hive + .rolegroup(rolegroup_ref) + .context(InternalOperatorFailureSnafu)?; + + // Build env vars + let mut env_vars = vec![ + EnvVar { + name: "HADOOP_HEAPSIZE".to_string(), + value: Some( + construct_hadoop_heapsize_env(merged_config).context(ConstructJvmArgumentsSnafu)?, + ), + ..EnvVar::default() + }, + EnvVar { + name: "HADOOP_OPTS".to_string(), + value: Some( + construct_non_heap_jvm_args(hive, role, &rolegroup_ref.role_group) + .context(ConstructJvmArgumentsSnafu)?, + ), + ..EnvVar::default() + }, + EnvVar { + name: "CONTAINERDEBUG_LOG_DIRECTORY".to_string(), + value: Some(format!("{STACKABLE_LOG_DIR}/containerdebug")), + ..EnvVar::default() + }, + ]; + + // Add database env vars + if let Some(ref env) = database_connection_details.username_env { + env_vars.push(env.clone()); + } + if let Some(ref env) = database_connection_details.password_env { + env_vars.push(env.clone()); + } + + // Add env overrides from product config + for (property_name_kind, config) in metastore_config { + if property_name_kind == &PropertyNameKind::Env { + for (property_name, property_value) in config { + if !property_name.is_empty() { + env_vars.push(EnvVar { + name: property_name.clone(), + value: Some(property_value.clone()), + ..EnvVar::default() + }); + } + } + } + } + + // HDFS volumes/mounts + let mut hdfs_volumes = Vec::new(); + let mut hdfs_volume_mounts = Vec::new(); + if let Some(hdfs) = &hive.spec.cluster_config.hdfs { + hdfs_volumes.push( + VolumeBuilder::new("hdfs-discovery") + .with_config_map(&hdfs.config_map) + .build(), + ); + hdfs_volume_mounts.push(VolumeMount { + name: "hdfs-discovery".to_string(), + mount_path: "/stackable/mount/hdfs-config".to_string(), + ..VolumeMount::default() + }); + } + + // S3 volumes/mounts + let mut s3_volumes = Vec::new(); + let mut s3_volume_mounts = Vec::new(); + if let Some(s3) = s3_connection { + // We need to use the builder to get the right volumes — unfortunately S3 add_volumes_and_mounts + // takes PodBuilder/ContainerBuilder. We'll compute those through a temporary builder. + let mut temp_pod_builder = PodBuilder::new(); + let mut temp_container_builder = + ContainerBuilder::new(APP_NAME).context(FailedToCreateHiveContainerSnafu { + name: APP_NAME.to_string(), + })?; + s3.add_volumes_and_mounts(&mut temp_pod_builder, vec![&mut temp_container_builder]) + .context(ConfigureS3ConnectionSnafu)?; + + if s3.tls.uses_tls() && !s3.tls.uses_tls_verification() { + S3TlsNoVerificationNotSupportedSnafu.fail()?; + } + + let temp_pod = temp_pod_builder.build_template(); + if let Some(spec) = &temp_pod.spec { + if let Some(vols) = &spec.volumes { + s3_volumes.extend(vols.clone()); + } + } + let temp_container = temp_container_builder.build(); + if let Some(mounts) = &temp_container.volume_mounts { + s3_volume_mounts.extend(mounts.clone()); + } + } + + // OPA volumes/mounts + let mut opa_volumes = Vec::new(); + let mut opa_volume_mounts = Vec::new(); + if let Some((tls_secret_class, tls_mount_path)) = + hive_opa_config.as_ref().and_then(|opa_config| { + opa_config + .tls_secret_class + .as_ref() + .zip(opa_config.tls_ca_cert_mount_path()) + }) + { + opa_volume_mounts.push(VolumeMount { + name: OPA_TLS_VOLUME_NAME.to_string(), + mount_path: tls_mount_path, + ..VolumeMount::default() + }); + + let opa_tls_volume = VolumeBuilder::new(OPA_TLS_VOLUME_NAME) + .ephemeral( + SecretOperatorVolumeSourceBuilder::new( + tls_secret_class, + SecretClassVolumeProvisionParts::Public, + ) + .build() + .context(TlsCertSecretClassVolumeBuildSnafu)?, + ) + .build(); + opa_volumes.push(opa_tls_volume); + } + + // Kerberos volumes/mounts + let mut kerberos_volumes = Vec::new(); + let mut kerberos_volume_mounts = Vec::new(); + if hive.has_kerberos_enabled() { + if let Some(kerberos_secret_class) = hive.kerberos_secret_class() { + let kerberos_secret_operator_volume = SecretOperatorVolumeSourceBuilder::new( + &kerberos_secret_class, + SecretClassVolumeProvisionParts::PublicPrivate, + ) + .with_service_scope(hive.name_any()) + .with_kerberos_service_name(hive_role.kerberos_service_name()) + .build() + .context(TlsCertSecretClassVolumeBuildSnafu)?; + kerberos_volumes.push( + VolumeBuilder::new("kerberos") + .ephemeral(kerberos_secret_operator_volume) + .build(), + ); + kerberos_volume_mounts.push(VolumeMount { + name: "kerberos".to_string(), + mount_path: "/stackable/kerberos".to_string(), + ..VolumeMount::default() + }); + env_vars.push(EnvVar { + name: "KRB5_CONFIG".to_string(), + value: Some("/stackable/kerberos/krb5.conf".to_string()), + ..EnvVar::default() + }); + } + } + + // Container commands + let db_type = hive.spec.cluster_config.metadata_database.as_hive_db_type(); + let start_command = if resolved_product_image.product_version.starts_with("3.") { + format!( + "bin/start-metastore --config {STACKABLE_CONFIG_DIR} --db-type {db_type} --hive-bin-dir bin &" + ) + } else { + indoc::formatdoc! {" + bin/base --config \"{STACKABLE_CONFIG_DIR}\" --service schemaTool -dbType \"{db_type}\" -initOrUpgradeSchema + bin/base --config \"{STACKABLE_CONFIG_DIR}\" --service metastore & + "} + }; + + let commands = build_container_command_args( + hive, + indoc::formatdoc! {" + {kerberos_container_start_commands} + + {COMMON_BASH_TRAP_FUNCTIONS} + {remove_vector_shutdown_file_command} + prepare_signal_handlers + containerdebug --output={STACKABLE_LOG_DIR}/containerdebug-state.json --loop & + {start_command} + wait_for_termination $! + {create_vector_shutdown_file_command} + ", + kerberos_container_start_commands = kerberos_container_start_commands(hive), + COMMON_BASH_TRAP_FUNCTIONS = stackable_operator::utils::COMMON_BASH_TRAP_FUNCTIONS, + remove_vector_shutdown_file_command = + product_logging::framework::remove_vector_shutdown_file_command(STACKABLE_LOG_DIR), + create_vector_shutdown_file_command = + product_logging::framework::create_vector_shutdown_file_command(STACKABLE_LOG_DIR), + }, + s3_connection, + hive_opa_config.as_ref(), + ); + + // RBAC service account name + let sa_name = format!("{}-{}-serviceaccount", APP_NAME, hive.name_any()); + + // Vector container + let vector_container = if merged_config.logging.enable_vector_agent { + let vector_aggregator_config_map_name = hive + .spec + .cluster_config + .vector_aggregator_config_map_name + .as_deref() + .context(VectorAggregatorConfigMapMissingSnafu)?; + + Some( + product_logging::framework::vector_container( + resolved_product_image, + STACKABLE_CONFIG_MOUNT_DIR_NAME, + STACKABLE_LOG_DIR_NAME, + merged_config.logging.containers.get(&Container::Vector), + ResourceRequirementsBuilder::new() + .with_cpu_request("250m") + .with_cpu_limit("500m") + .with_memory_request("128Mi") + .with_memory_limit("128Mi") + .build(), + vector_aggregator_config_map_name, + ) + .context(BuildVectorContainerSnafu)?, + ) + } else { + None + }; + + // Pod overrides + let mut pod_overrides = PodTemplateSpec::default(); + pod_overrides.merge_from(role.config.pod_overrides.clone()); + pod_overrides.merge_from(rolegroup.config.pod_overrides.clone()); + + // Listener PVC + let recommended_object_labels = ObjectLabels { + owner: hive, + app_name: APP_NAME, + app_version: "none", + operator_name: OPERATOR_NAME, + controller_name: HIVE_CONTROLLER_NAME, + role: &rolegroup_ref.role, + role_group: &rolegroup_ref.role_group, + }; + let unversioned_recommended_labels = + Labels::recommended(&recommended_object_labels).context(LabelBuildSnafu)?; + + let listener_volume_claim_template = ListenerOperatorVolumeSourceBuilder::new( + &ListenerReference::ListenerName(hive.role_listener_name(hive_role)), + &unversioned_recommended_labels, + ) + .build_pvc(LISTENER_VOLUME_NAME.to_owned()) + .context(BuildListenerVolumeSnafu)?; + + Ok(PrecomputedPodData { + env_vars, + commands, + kerberos_volumes, + kerberos_volume_mounts, + s3_volumes, + s3_volume_mounts, + hdfs_volumes, + hdfs_volume_mounts, + opa_volumes, + opa_volume_mounts, + vector_container, + service_account_name: sa_name, + replicas: rolegroup.replicas, + pod_overrides, + listener_volume_claim_template, + }) +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use stackable_operator::{ + commons::{networking::DomainName, product_image_selection::ResolvedProductImage}, + kvp::LabelValue, + utils::cluster_info::KubernetesClusterInfo, + }; + + use super::{ErrorDiscriminants, validate_cluster}; + use crate::{controller::dereference::DereferencedObjects, crd::v1alpha1}; + + // CARGO_MANIFEST_DIR points to rust/operator-binary; properties.yaml is two levels up. + const PROPERTIES_YAML: &str = concat!( + env!("CARGO_MANIFEST_DIR"), + "/../../deploy/config-spec/properties.yaml" + ); + + fn test_hive_cluster() -> v1alpha1::HiveCluster { + let yaml = indoc::indoc! {r#" + apiVersion: hive.stackable.tech/v1alpha1 + kind: HiveCluster + metadata: + name: test-hive + namespace: default + uid: c27b3971-ca72-42c1-80a4-abdfc1db0ddd + spec: + image: + productVersion: "4.0.1" + clusterConfig: + metadataDatabase: + derby: + databaseName: metastore_db + metastore: + roleGroups: + default: + replicas: 2 + "#}; + stackable_operator::utils::yaml_from_str_singleton_map(yaml) + .expect("HiveCluster YAML should parse") + } + + fn test_dereferenced_objects(hive: &v1alpha1::HiveCluster) -> DereferencedObjects { + let resolved_product_image = ResolvedProductImage { + product_version: "4.0.1".to_string(), + app_version_label_value: LabelValue::from_str("4.0.1").unwrap(), + image: "oci.stackable.tech/sdp/hive:4.0.1-stackable0.0.0-dev".to_string(), + image_pull_policy: "IfNotPresent".to_string(), + pull_secrets: None, + }; + + let metadata_database_connection_details = hive + .spec + .cluster_config + .metadata_database + .jdbc_connection_details("METADATA") + .expect("derby JDBC connection details should be valid"); + + DereferencedObjects { + resolved_product_image, + s3_connection_spec: None, + metadata_database_connection_details, + hive_opa_config: None, + cluster_info: KubernetesClusterInfo { + cluster_domain: DomainName::from_str("cluster.local") + .expect("cluster.local should be a valid DomainName"), + }, + } + } + + fn test_product_config() -> product_config::ProductConfigManager { + product_config::ProductConfigManager::from_yaml_file(PROPERTIES_YAML) + .expect("properties.yaml should be valid") + } + + fn assert_validate_err( + mutate: impl FnOnce(&mut v1alpha1::HiveCluster), + expected: ErrorDiscriminants, + ) { + let mut hive = test_hive_cluster(); + mutate(&mut hive); + let deref = test_dereferenced_objects(&hive); + let product_config = test_product_config(); + let result = validate_cluster(&hive, &deref, &product_config); + match result { + Err(err) => assert_eq!(expected, ErrorDiscriminants::from(err)), + Ok(_) => panic!("validate_cluster should have failed with {expected:?}"), + } + } + + #[test] + fn test_validate_ok() { + let hive = test_hive_cluster(); + let deref = test_dereferenced_objects(&hive); + let product_config = test_product_config(); + let result = validate_cluster(&hive, &deref, &product_config); + assert!(result.is_ok(), "validate_cluster should succeed"); + let validated = result.unwrap(); + assert_eq!(validated.name.to_string(), "test-hive"); + assert_eq!(validated.namespace.to_string(), "default"); + } + + #[test] + fn test_validate_err_invalid_name() { + assert_validate_err( + |hive| hive.metadata.name = Some("UPPERCASE".to_string()), + ErrorDiscriminants::InvalidClusterName, + ); + } + + #[test] + fn test_validate_err_missing_namespace() { + assert_validate_err( + |hive| hive.metadata.namespace = None, + ErrorDiscriminants::MissingNamespace, + ); + } + + #[test] + fn test_validate_err_missing_uid() { + assert_validate_err( + |hive| hive.metadata.uid = None, + ErrorDiscriminants::MissingUid, + ); + } + + #[test] + fn test_validate_err_missing_metastore_role() { + assert_validate_err( + |hive| hive.spec.metastore = None, + ErrorDiscriminants::NoMetaStoreRole, + ); + } +} diff --git a/rust/operator-binary/src/crd/mod.rs b/rust/operator-binary/src/crd/mod.rs index 7e4edc63..09a33de1 100644 --- a/rust/operator-binary/src/crd/mod.rs +++ b/rust/operator-binary/src/crd/mod.rs @@ -30,7 +30,6 @@ use stackable_operator::{ schemars::{self, JsonSchema}, shared::time::Duration, status::condition::{ClusterCondition, HasStatusCondition}, - utils::cluster_info::KubernetesClusterInfo, versioned::versioned, }; use strum::{Display, EnumIter, EnumString, IntoEnumIterator}; @@ -239,31 +238,6 @@ impl v1alpha1::HiveCluster { } } - /// List all pods expected to form the cluster - /// - /// We try to predict the pods here rather than looking at the current cluster state in order to - /// avoid instance churn. - pub fn pods(&self) -> Result + '_, NoNamespaceError> { - let ns = self.metadata.namespace.clone().context(NoNamespaceSnafu)?; - Ok(self - .spec - .metastore - .iter() - .flat_map(|role| &role.role_groups) - // Order rolegroups consistently, to avoid spurious downstream rewrites - .collect::>() - .into_iter() - .flat_map(move |(rolegroup_name, rolegroup)| { - let rolegroup_ref = self.metastore_rolegroup_ref(rolegroup_name); - let ns = ns.clone(); - (0..rolegroup.replicas.unwrap_or(0)).map(move |i| PodRef { - namespace: ns.clone(), - role_group_service_name: rolegroup_ref.object_name(), - pod_name: format!("{}-{}", rolegroup_ref.object_name(), i), - }) - })) - } - pub fn role(&self, role_variant: &HiveRole) -> Result<&HiveRoleType, Error> { match role_variant { HiveRole::MetaStore => self.spec.metastore.as_ref(), @@ -383,7 +357,7 @@ pub struct HdfsConnection { pub config_map: String, } -#[derive(Display, EnumString, EnumIter)] +#[derive(Clone, Debug, Display, EnumString, EnumIter, Eq, Hash, Ord, PartialEq, PartialOrd)] #[strum(serialize_all = "camelCase")] pub enum HiveRole { #[strum(serialize = "metastore")] @@ -391,7 +365,7 @@ pub enum HiveRole { } impl HiveRole { - /// Metadata about a rolegroup + #[allow(dead_code)] pub fn rolegroup_ref( &self, hive: &v1alpha1::HiveCluster, @@ -591,30 +565,6 @@ pub struct HiveClusterStatus { pub conditions: Vec, } -#[derive(Debug, Snafu)] -#[snafu(display("object has no namespace associated"))] -pub struct NoNamespaceError; - -/// Reference to a single `Pod` that is a component of a [`HiveCluster`] -/// Used for service discovery. -pub struct PodRef { - pub namespace: String, - pub role_group_service_name: String, - pub pod_name: String, -} - -impl PodRef { - pub fn fqdn(&self, cluster_info: &KubernetesClusterInfo) -> String { - format!( - "{pod_name}.{service_name}.{namespace}.svc.{cluster_domain}", - pod_name = self.pod_name, - service_name = self.role_group_service_name, - namespace = self.namespace, - cluster_domain = cluster_info.cluster_domain - ) - } -} - #[cfg(test)] mod tests { use stackable_operator::versioned::test_utils::RoundtripTestData; diff --git a/rust/operator-binary/src/discovery.rs b/rust/operator-binary/src/discovery.rs deleted file mode 100644 index c6956e16..00000000 --- a/rust/operator-binary/src/discovery.rs +++ /dev/null @@ -1,113 +0,0 @@ -use snafu::{OptionExt, ResultExt, Snafu}; -use stackable_operator::{ - builder::{configmap::ConfigMapBuilder, meta::ObjectMetaBuilder}, - commons::product_image_selection::ResolvedProductImage, - crd::listener::v1alpha1::Listener, - k8s_openapi::api::core::v1::ConfigMap, - kube::{Resource, runtime::reflector::ObjectRef}, -}; - -use crate::{ - controller::build_recommended_labels, - crd::{HiveRole, v1alpha1}, - listener::build_listener_connection_string, -}; - -#[derive(Snafu, Debug)] -pub enum Error { - #[snafu(display("object is missing metadata to build owner reference {hive}"))] - ObjectMissingMetadataForOwnerRef { - source: stackable_operator::builder::meta::Error, - hive: ObjectRef, - }, - - #[snafu(display("could not build discovery config map for {obj_ref}"))] - DiscoveryConfigMap { - source: stackable_operator::builder::configmap::Error, - obj_ref: ObjectRef, - }, - #[snafu(display("invalid owner name for discovery ConfigMap"))] - InvalidOwnerNameForDiscoveryConfigMap, - - #[snafu(display("failed to build Metadata"))] - MetadataBuild { - source: stackable_operator::builder::meta::Error, - }, - #[snafu(display("failed to configure listener discovery configmap"))] - ListenerConfiguration { source: crate::listener::Error }, -} - -/// Builds discovery [`ConfigMap`]s for connecting to a [`v1alpha1::HiveCluster`] for all expected -/// scenarios. -pub async fn build_discovery_configmaps( - owner: &impl Resource, - hive: &v1alpha1::HiveCluster, - hive_role: HiveRole, - resolved_product_image: &ResolvedProductImage, - chroot: Option<&str>, - listener: Listener, -) -> Result, Error> { - let name = owner - .meta() - .name - .as_ref() - .context(InvalidOwnerNameForDiscoveryConfigMapSnafu)?; - - let discovery_configmaps = vec![build_discovery_configmap( - name, - owner, - hive, - hive_role, - resolved_product_image, - chroot, - listener, - )?]; - - Ok(discovery_configmaps) -} - -/// Build a discovery [`ConfigMap`] containing information about how to connect to a certain -/// [`v1alpha1::HiveCluster`]. -/// -/// Data is coming from the [`Listener`] objects. Connection string is only build by [`build_listener_connection_string`]. -fn build_discovery_configmap( - name: &str, - owner: &impl Resource, - hive: &v1alpha1::HiveCluster, - hive_role: HiveRole, - resolved_product_image: &ResolvedProductImage, - chroot: Option<&str>, - listener: Listener, -) -> Result { - let mut discovery_configmap = ConfigMapBuilder::new(); - - discovery_configmap.metadata( - ObjectMetaBuilder::new() - .name_and_namespace(hive) - .name(name) - .ownerreference_from_resource(owner, None, Some(true)) - .with_context(|_| ObjectMissingMetadataForOwnerRefSnafu { - hive: ObjectRef::from_obj(hive), - })? - .with_recommended_labels(&build_recommended_labels( - hive, - &resolved_product_image.app_version_label_value, - &hive_role.to_string(), - "discovery", - )) - .context(MetadataBuildSnafu)? - .build(), - ); - - discovery_configmap.add_data( - "HIVE".to_string(), - build_listener_connection_string(listener, &hive_role.to_string(), chroot) - .context(ListenerConfigurationSnafu)?, - ); - - discovery_configmap - .build() - .with_context(|_| DiscoveryConfigMapSnafu { - obj_ref: ObjectRef::from_obj(hive), - }) -} diff --git a/rust/operator-binary/src/framework.rs b/rust/operator-binary/src/framework.rs index 8bc3c995..1a98b8c3 100644 --- a/rust/operator-binary/src/framework.rs +++ b/rust/operator-binary/src/framework.rs @@ -21,16 +21,13 @@ use types::kubernetes::Uid; -#[allow(dead_code)] pub mod builder; #[allow(dead_code)] pub mod cluster_resources; #[allow(dead_code)] pub mod controller_utils; -#[allow(dead_code)] pub mod kvp; pub mod macros; -#[allow(dead_code)] pub mod product_logging; #[allow(dead_code)] pub mod role_group_utils; @@ -48,13 +45,11 @@ pub trait HasName { } /// Has a Kubernetes UID -#[allow(dead_code)] pub trait HasUid { fn to_uid(&self) -> Uid; } /// The name is a valid label value -#[allow(dead_code)] pub trait NameIsValidLabelValue { fn to_label_value(&self) -> String; } diff --git a/rust/operator-binary/src/framework/builder.rs b/rust/operator-binary/src/framework/builder.rs index a6530b5d..5d02a0b0 100644 --- a/rust/operator-binary/src/framework/builder.rs +++ b/rust/operator-binary/src/framework/builder.rs @@ -1,6 +1,4 @@ -#[allow(dead_code)] pub mod meta; -#[allow(dead_code)] pub mod pdb; #[allow(dead_code)] pub mod pod; diff --git a/rust/operator-binary/src/framework/builder/pdb.rs b/rust/operator-binary/src/framework/builder/pdb.rs index 46e29310..be149c8c 100644 --- a/rust/operator-binary/src/framework/builder/pdb.rs +++ b/rust/operator-binary/src/framework/builder/pdb.rs @@ -75,13 +75,29 @@ mod tests { type DynamicType = (); type Scope = (); - fn kind(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("HiveCluster") } - fn group(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("hive.stackable.tech") } - fn version(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("v1alpha1") } - fn plural(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("hiveclusters") } + fn kind(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("HiveCluster") + } + + fn group(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("hive.stackable.tech") + } + + fn version(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("v1alpha1") + } - fn meta(&self) -> &ObjectMeta { &self.object_meta } - fn meta_mut(&mut self) -> &mut ObjectMeta { &mut self.object_meta } + fn plural(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("hiveclusters") + } + + fn meta(&self) -> &ObjectMeta { + &self.object_meta + } + + fn meta_mut(&mut self) -> &mut ObjectMeta { + &mut self.object_meta + } } impl HasName for Cluster { diff --git a/rust/operator-binary/src/framework/kvp/label.rs b/rust/operator-binary/src/framework/kvp/label.rs index 72120fde..d70641e2 100644 --- a/rust/operator-binary/src/framework/kvp/label.rs +++ b/rust/operator-binary/src/framework/kvp/label.rs @@ -100,10 +100,21 @@ mod tests { type DynamicType = (); type Scope = (); - fn kind(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("HiveCluster") } - fn group(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("hive.stackable.tech") } - fn version(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("v1alpha1") } - fn plural(_dt: &Self::DynamicType) -> Cow<'_, str> { Cow::from("hiveclusters") } + fn kind(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("HiveCluster") + } + + fn group(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("hive.stackable.tech") + } + + fn version(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("v1alpha1") + } + + fn plural(_dt: &Self::DynamicType) -> Cow<'_, str> { + Cow::from("hiveclusters") + } fn meta(&self) -> &ObjectMeta { &self.object_meta diff --git a/rust/operator-binary/src/framework/role_utils.rs b/rust/operator-binary/src/framework/role_utils.rs index cb009721..de61ac54 100644 --- a/rust/operator-binary/src/framework/role_utils.rs +++ b/rust/operator-binary/src/framework/role_utils.rs @@ -22,7 +22,7 @@ use super::{ }, }; -/// Variant of [`stackable_operator::role_utils::GenericProductSpecificCommonConfig`] that +/// Variant of `stackable_operator::role_utils::GenericCommonConfig` that /// implements [`Merge`] #[derive(Clone, Debug, Default, Deserialize, JsonSchema, PartialEq, Serialize)] pub struct GenericProductSpecificCommonConfig {} @@ -289,9 +289,27 @@ mod tests { } #[rstest] - #[case("role-group", Some("role-group"), Some("role-group"), Some("role"), Some("default"))] - #[case("role-group", Some("role-group"), Some("role-group"), Some("role"), None)] - #[case("role-group", Some("role-group"), Some("role-group"), None, Some("default"))] + #[case( + "role-group", + Some("role-group"), + Some("role-group"), + Some("role"), + Some("default") + )] + #[case( + "role-group", + Some("role-group"), + Some("role-group"), + Some("role"), + None + )] + #[case( + "role-group", + Some("role-group"), + Some("role-group"), + None, + Some("default") + )] #[case("role-group", Some("role-group"), Some("role-group"), None, None)] #[case("role", Some("role"), None, Some("role"), Some("default"))] #[case("role", Some("role"), None, Some("role"), None)] diff --git a/rust/operator-binary/src/kerberos.rs b/rust/operator-binary/src/kerberos.rs index 6d309aa2..5ef3bddb 100644 --- a/rust/operator-binary/src/kerberos.rs +++ b/rust/operator-binary/src/kerberos.rs @@ -1,76 +1,10 @@ use std::collections::BTreeMap; use indoc::formatdoc; -use snafu::{ResultExt, Snafu}; -use stackable_operator::{ - builder::{ - self, - pod::{ - PodBuilder, - container::ContainerBuilder, - volume::{ - SecretOperatorVolumeSourceBuilder, SecretOperatorVolumeSourceBuilderError, - VolumeBuilder, - }, - }, - }, - commons::secret_class::SecretClassVolumeProvisionParts, - kube::ResourceExt, - utils::cluster_info::KubernetesClusterInfo, -}; +use stackable_operator::{kube::ResourceExt, utils::cluster_info::KubernetesClusterInfo}; use crate::crd::{HIVE_SITE_XML, HiveRole, STACKABLE_CONFIG_DIR, v1alpha1}; -#[derive(Snafu, Debug)] -#[allow(clippy::enum_variant_names)] // all variants have the same prefix: `Add` -pub enum Error { - #[snafu(display("failed to add Kerberos secret volume"))] - AddKerberosSecretVolume { - source: SecretOperatorVolumeSourceBuilderError, - }, - - #[snafu(display("failed to add needed volume"))] - AddVolume { source: builder::pod::Error }, - - #[snafu(display("failed to add needed volumeMount"))] - AddVolumeMount { - source: builder::pod::container::Error, - }, -} - -pub fn add_kerberos_pod_config( - hive: &v1alpha1::HiveCluster, - role: &HiveRole, - cb: &mut ContainerBuilder, - pb: &mut PodBuilder, -) -> Result<(), Error> { - if let Some(kerberos_secret_class) = hive.kerberos_secret_class() { - // Mount keytab - let kerberos_secret_operator_volume = SecretOperatorVolumeSourceBuilder::new( - kerberos_secret_class, - // We need both public (krb5.conf) and private (keytab) parts. - SecretClassVolumeProvisionParts::PublicPrivate, - ) - .with_service_scope(hive.name_any()) - .with_kerberos_service_name(role.kerberos_service_name()) - .build() - .context(AddKerberosSecretVolumeSnafu)?; - pb.add_volume( - VolumeBuilder::new("kerberos") - .ephemeral(kerberos_secret_operator_volume) - .build(), - ) - .context(AddVolumeSnafu)?; - cb.add_volume_mount("kerberos", "/stackable/kerberos") - .context(AddVolumeMountSnafu)?; - - // Needed env vars - cb.add_env_var("KRB5_CONFIG", "/stackable/kerberos/krb5.conf"); - } - - Ok(()) -} - pub fn kerberos_config_properties( hive: &v1alpha1::HiveCluster, hive_namespace: &str, diff --git a/rust/operator-binary/src/listener.rs b/rust/operator-binary/src/listener.rs index 6b503080..f24f6896 100644 --- a/rust/operator-binary/src/listener.rs +++ b/rust/operator-binary/src/listener.rs @@ -1,14 +1,7 @@ -use snafu::{OptionExt, ResultExt, Snafu}; -use stackable_operator::{ - builder::meta::ObjectMetaBuilder, - commons::product_image_selection::ResolvedProductImage, - crd::listener::v1alpha1::{Listener, ListenerPort, ListenerSpec}, -}; +use snafu::{OptionExt, Snafu}; +use stackable_operator::crd::listener::v1alpha1::Listener; -use crate::{ - controller::build_recommended_labels, - crd::{HIVE_PORT, HIVE_PORT_NAME, HiveRole, v1alpha1}, -}; +use crate::crd::HIVE_PORT_NAME; // Listener volumes pub const LISTENER_VOLUME_NAME: &str = "listener"; @@ -19,14 +12,6 @@ pub const DEFAULT_LISTENER_CLASS: &str = "cluster-internal"; #[derive(Snafu, Debug)] pub enum Error { - #[snafu(display("object is missing metadata to build owner reference"))] - ObjectMissingMetadataForOwnerRef { - source: stackable_operator::builder::meta::Error, - }, - #[snafu(display("failed to build Metadata"))] - MetadataBuild { - source: stackable_operator::builder::meta::Error, - }, #[snafu(display("{role} listener has no adress"))] RoleListenerHasNoAddress { role: String }, #[snafu(display("could not find port [{port_name}] for rolegroup listener {role}"))] @@ -67,51 +52,6 @@ pub fn build_listener_connection_string( Ok(conn_str) } -// Designed to build a listener per role -// In case of Hive we expect only one role: Metastore -pub fn build_role_listener( - hive: &v1alpha1::HiveCluster, - resolved_product_image: &ResolvedProductImage, - hive_role: &HiveRole, - listener_class: &String, -) -> Result { - let metadata = ObjectMetaBuilder::new() - .name_and_namespace(hive) - .name(hive.role_listener_name(hive_role)) - .ownerreference_from_resource(hive, None, Some(true)) - .context(ObjectMissingMetadataForOwnerRefSnafu)? - .with_recommended_labels(&build_recommended_labels( - hive, - &resolved_product_image.app_version_label_value, - &hive_role.to_string(), - "none", - )) - .context(MetadataBuildSnafu)? - .build(); - - let spec = ListenerSpec { - class_name: Some(listener_class.to_owned()), - ports: Some(listener_ports()), - ..Default::default() - }; - - let listener = Listener { - metadata, - spec, - status: None, - }; - - Ok(listener) -} - -pub fn listener_ports() -> Vec { - vec![ListenerPort { - name: HIVE_PORT_NAME.to_owned(), - port: HIVE_PORT.into(), - protocol: Some("TCP".to_owned()), - }] -} - // used by crds to define a default listener_class name pub fn metastore_default_listener_class() -> String { DEFAULT_LISTENER_CLASS.to_owned() diff --git a/rust/operator-binary/src/main.rs b/rust/operator-binary/src/main.rs index 6e950154..90d4ad3f 100644 --- a/rust/operator-binary/src/main.rs +++ b/rust/operator-binary/src/main.rs @@ -41,13 +41,10 @@ mod command; mod config; mod controller; mod crd; -mod discovery; mod framework; mod kerberos; mod listener; -mod operations; mod product_logging; -mod service; mod webhooks; mod built_info { @@ -163,7 +160,7 @@ async fn main() -> anyhow::Result<()> { ) .graceful_shutdown_on(sigterm_watcher.handle()) .run( - controller::reconcile_hive, + controller::reconcile, controller::error_policy, Arc::new(controller::Ctx { client: client.clone(), diff --git a/rust/operator-binary/src/operations/graceful_shutdown.rs b/rust/operator-binary/src/operations/graceful_shutdown.rs deleted file mode 100644 index e54bd51e..00000000 --- a/rust/operator-binary/src/operations/graceful_shutdown.rs +++ /dev/null @@ -1,27 +0,0 @@ -use snafu::{ResultExt, Snafu}; -use stackable_operator::builder::pod::PodBuilder; - -use crate::crd::MetaStoreConfig; - -#[derive(Debug, Snafu)] -pub enum Error { - #[snafu(display("Failed to set terminationGracePeriod"))] - SetTerminationGracePeriod { - source: stackable_operator::builder::pod::Error, - }, -} - -pub fn add_graceful_shutdown_config( - merged_config: &MetaStoreConfig, - pod_builder: &mut PodBuilder, -) -> Result<(), Error> { - // This must be always set by the merge mechanism, as we provide a default value, - // users can not disable graceful shutdown. - if let Some(graceful_shutdown_timeout) = merged_config.graceful_shutdown_timeout { - pod_builder - .termination_grace_period(&graceful_shutdown_timeout) - .context(SetTerminationGracePeriodSnafu)?; - } - - Ok(()) -} diff --git a/rust/operator-binary/src/operations/mod.rs b/rust/operator-binary/src/operations/mod.rs deleted file mode 100644 index 92ca2ec7..00000000 --- a/rust/operator-binary/src/operations/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod graceful_shutdown; -pub mod pdb; diff --git a/rust/operator-binary/src/operations/pdb.rs b/rust/operator-binary/src/operations/pdb.rs deleted file mode 100644 index 18b1984a..00000000 --- a/rust/operator-binary/src/operations/pdb.rs +++ /dev/null @@ -1,63 +0,0 @@ -use snafu::{ResultExt, Snafu}; -use stackable_operator::{ - builder::pdb::PodDisruptionBudgetBuilder, client::Client, cluster_resources::ClusterResources, - commons::pdb::PdbConfig, kube::ResourceExt, -}; - -use crate::{ - OPERATOR_NAME, - controller::HIVE_CONTROLLER_NAME, - crd::{APP_NAME, HiveRole, v1alpha1}, -}; - -#[derive(Snafu, Debug)] -pub enum Error { - #[snafu(display("Cannot create PodDisruptionBudget for role [{role}]"))] - CreatePdb { - source: stackable_operator::builder::pdb::Error, - role: String, - }, - #[snafu(display("Cannot apply PodDisruptionBudget [{name}]"))] - ApplyPdb { - source: stackable_operator::cluster_resources::Error, - name: String, - }, -} - -pub async fn add_pdbs( - pdb: &PdbConfig, - hive: &v1alpha1::HiveCluster, - role: &HiveRole, - client: &Client, - cluster_resources: &mut ClusterResources<'_>, -) -> Result<(), Error> { - if !pdb.enabled { - return Ok(()); - } - let max_unavailable = pdb.max_unavailable.unwrap_or(match role { - HiveRole::MetaStore => max_unavailable_metastores(), - }); - let pdb = PodDisruptionBudgetBuilder::new_with_role( - hive, - APP_NAME, - &role.to_string(), - OPERATOR_NAME, - HIVE_CONTROLLER_NAME, - ) - .with_context(|_| CreatePdbSnafu { - role: role.to_string(), - })? - .with_max_unavailable(max_unavailable) - .build(); - let pdb_name = pdb.name_any(); - cluster_resources - .add(client, pdb) - .await - .with_context(|_| ApplyPdbSnafu { name: pdb_name })?; - - Ok(()) -} - -fn max_unavailable_metastores() -> u16 { - 1 -} diff --git a/rust/operator-binary/src/product_logging.rs b/rust/operator-binary/src/product_logging.rs index 30e9318b..0e86c676 100644 --- a/rust/operator-binary/src/product_logging.rs +++ b/rust/operator-binary/src/product_logging.rs @@ -1,6 +1,7 @@ use snafu::Snafu; use stackable_operator::{ builder::configmap::ConfigMapBuilder, + kube::Resource, memory::BinaryMultiple, product_logging::{ self, @@ -10,26 +11,19 @@ use stackable_operator::{ }; use crate::{ - controller::MAX_HIVE_LOG_FILES_SIZE, - crd::{Container, HIVE_METASTORE_LOG4J2_PROPERTIES, STACKABLE_LOG_DIR, v1alpha1}, + controller::validate::MAX_HIVE_LOG_FILES_SIZE, + crd::{Container, HIVE_METASTORE_LOG4J2_PROPERTIES, STACKABLE_LOG_DIR}, }; #[derive(Snafu, Debug)] pub enum Error { #[snafu(display("object has no namespace"))] ObjectHasNoNamespace, - #[snafu(display("failed to retrieve the ConfigMap [{cm_name}]"))] - ConfigMapNotFound { - source: stackable_operator::client::Error, - cm_name: String, - }, #[snafu(display("failed to retrieve the entry [{entry}] for ConfigMap [{cm_name}]"))] MissingConfigMapEntry { entry: &'static str, cm_name: String, }, - #[snafu(display("crd validation failure"))] - CrdValidationFailure { source: crate::crd::Error }, } type Result = std::result::Result; @@ -38,11 +32,14 @@ const CONSOLE_CONVERSION_PATTERN: &str = "%d{ISO8601} %5p [%t] %c{2}: %m%n"; const HIVE_LOG_FILE: &str = "hive.log4j2.xml"; /// Extend the role group ConfigMap with logging and Vector configurations -pub fn extend_role_group_config_map( - rolegroup: &RoleGroupRef, +pub fn extend_role_group_config_map( + rolegroup: &RoleGroupRef, logging: &Logging, cm_builder: &mut ConfigMapBuilder, -) -> Result<()> { +) -> Result<()> +where + T: Resource, +{ if let Some(ContainerLogConfig { choice: Some(ContainerLogConfigChoice::Automatic(log_config)), }) = logging.containers.get(&Container::Hive) diff --git a/rust/operator-binary/src/service.rs b/rust/operator-binary/src/service.rs deleted file mode 100644 index 2ef11f95..00000000 --- a/rust/operator-binary/src/service.rs +++ /dev/null @@ -1,148 +0,0 @@ -use snafu::{ResultExt, Snafu}; -use stackable_operator::{ - builder::meta::ObjectMetaBuilder, - commons::product_image_selection::ResolvedProductImage, - k8s_openapi::api::core::v1::{Service, ServicePort, ServiceSpec}, - kvp::{Annotations, Labels}, - role_utils::RoleGroupRef, -}; - -use crate::{ - controller::build_recommended_labels, - crd::{APP_NAME, HIVE_PORT, HIVE_PORT_NAME, METRICS_PORT, METRICS_PORT_NAME, v1alpha1}, -}; - -#[derive(Debug, Snafu)] -pub enum Error { - #[snafu(display("object is missing metadata to build owner reference"))] - ObjectMissingMetadataForOwnerRef { - source: stackable_operator::builder::meta::Error, - }, - #[snafu(display("failed to build Metadata"))] - MetadataBuild { - source: stackable_operator::builder::meta::Error, - }, - #[snafu(display("failed to build Labels"))] - LabelBuild { - source: stackable_operator::kvp::LabelError, - }, -} - -/// The rolegroup [`Service`] is a headless service that allows direct access to the instances of a certain rolegroup -/// -/// This is mostly useful for internal communication between peers, or for clients that perform client-side load balancing. -pub fn build_rolegroup_headless_service( - hive: &v1alpha1::HiveCluster, - resolved_product_image: &ResolvedProductImage, - rolegroup: &RoleGroupRef, -) -> Result { - let headless_service = Service { - metadata: ObjectMetaBuilder::new() - .name_and_namespace(hive) - .name(rolegroup.rolegroup_headless_service_name()) - .ownerreference_from_resource(hive, None, Some(true)) - .context(ObjectMissingMetadataForOwnerRefSnafu)? - .with_recommended_labels(&build_recommended_labels( - hive, - &resolved_product_image.app_version_label_value, - &rolegroup.role, - &rolegroup.role_group, - )) - .context(MetadataBuildSnafu)? - .build(), - spec: Some(ServiceSpec { - // Internal communication does not need to be exposed - type_: Some("ClusterIP".to_string()), - cluster_ip: Some("None".to_string()), - // Expecting same ports as on listener service, just as a headless, internal service - ports: Some(service_ports()), - selector: Some( - Labels::role_group_selector(hive, APP_NAME, &rolegroup.role, &rolegroup.role_group) - .context(LabelBuildSnafu)? - .into(), - ), - publish_not_ready_addresses: Some(true), - ..ServiceSpec::default() - }), - status: None, - }; - Ok(headless_service) -} - -/// The rolegroup metrics [`Service`] is a service that exposes metrics and a prometheus scraping label -pub fn build_rolegroup_metrics_service( - hive: &v1alpha1::HiveCluster, - resolved_product_image: &ResolvedProductImage, - rolegroup: &RoleGroupRef, -) -> Result { - let metrics_service = Service { - metadata: ObjectMetaBuilder::new() - .name_and_namespace(hive) - .name(rolegroup.rolegroup_metrics_service_name()) - .ownerreference_from_resource(hive, None, Some(true)) - .context(ObjectMissingMetadataForOwnerRefSnafu)? - .with_recommended_labels(&build_recommended_labels( - hive, - &resolved_product_image.app_version_label_value, - &rolegroup.role, - &rolegroup.role_group, - )) - .context(MetadataBuildSnafu)? - .with_labels(prometheus_labels()) - .with_annotations(prometheus_annotations()) - .build(), - spec: Some(ServiceSpec { - // Internal communication does not need to be exposed - type_: Some("ClusterIP".to_string()), - cluster_ip: Some("None".to_string()), - ports: Some(metrics_ports()), - selector: Some( - Labels::role_group_selector(hive, APP_NAME, &rolegroup.role, &rolegroup.role_group) - .context(LabelBuildSnafu)? - .into(), - ), - publish_not_ready_addresses: Some(true), - ..ServiceSpec::default() - }), - status: None, - }; - Ok(metrics_service) -} - -fn metrics_ports() -> Vec { - vec![ServicePort { - name: Some(METRICS_PORT_NAME.to_string()), - port: METRICS_PORT.into(), - protocol: Some("TCP".to_string()), - ..ServicePort::default() - }] -} - -fn service_ports() -> Vec { - vec![ServicePort { - name: Some(HIVE_PORT_NAME.to_string()), - port: HIVE_PORT.into(), - protocol: Some("TCP".to_string()), - ..ServicePort::default() - }] -} - -/// Common labels for Prometheus -fn prometheus_labels() -> Labels { - Labels::try_from([("prometheus.io/scrape", "true")]).expect("should be a valid label") -} - -/// Common annotations for Prometheus -/// -/// These annotations can be used in a ServiceMonitor. -/// -/// see also -fn prometheus_annotations() -> Annotations { - Annotations::try_from([ - ("prometheus.io/path".to_owned(), "/metrics".to_owned()), - ("prometheus.io/port".to_owned(), METRICS_PORT.to_string()), - ("prometheus.io/scheme".to_owned(), "http".to_owned()), - ("prometheus.io/scrape".to_owned(), "true".to_owned()), - ]) - .expect("should be valid annotations") -}