From 4e048d398bb953bd322c593208661a464e8a778e Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sun, 12 Apr 2026 12:34:07 +0000 Subject: [PATCH 01/24] Update autogenerated version to 26.3.8.4 and contributors --- cmake/autogenerated_versions.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index 50f7dfe035ec..005fad76dc6a 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -2,11 +2,11 @@ # NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION, # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. -SET(VERSION_REVISION 54515) +SET(VERSION_REVISION 54516) SET(VERSION_MAJOR 26) SET(VERSION_MINOR 3) -SET(VERSION_PATCH 8) -SET(VERSION_GITHASH 177c6aa7da1703d851ce0d997dd76f035fa6940d) -SET(VERSION_DESCRIBE v26.3.8.1-lts) -SET(VERSION_STRING 26.3.8.1) +SET(VERSION_PATCH 9) +SET(VERSION_GITHASH f3c6e5a4d27c3997b2a91174752e44acedc51f74) +SET(VERSION_DESCRIBE v26.3.9.1-lts) +SET(VERSION_STRING 26.3.9.1) # end of autochange From e6d44b0d93815a30e70f5d8db6e4d8916a3b43b3 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sun, 12 Apr 2026 17:23:04 +0000 Subject: [PATCH 02/24] Backport #100573 to 26.3: Fix Block structure mismatch exception in MaterializingCTETransform --- src/QueryPipeline/QueryPipelineBuilder.cpp | 1 + .../04057_materialized_cte_with_totals.reference | 16 ++++++++++++++++ .../04057_materialized_cte_with_totals.sql | 9 +++++++++ 3 files changed, 26 insertions(+) create mode 100644 tests/queries/0_stateless/04057_materialized_cte_with_totals.reference create mode 100644 tests/queries/0_stateless/04057_materialized_cte_with_totals.sql diff --git a/src/QueryPipeline/QueryPipelineBuilder.cpp b/src/QueryPipeline/QueryPipelineBuilder.cpp index a93c6d85f845..29d5e39bd7da 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.cpp +++ b/src/QueryPipeline/QueryPipelineBuilder.cpp @@ -763,6 +763,7 @@ void QueryPipelineBuilder::addMaterializingCTETransform( ) { checkInitializedAndNotCompleted(); + dropTotalsAndExtremes(); resize(1); auto transform = std::make_shared( diff --git a/tests/queries/0_stateless/04057_materialized_cte_with_totals.reference b/tests/queries/0_stateless/04057_materialized_cte_with_totals.reference new file mode 100644 index 000000000000..1d995f09031b --- /dev/null +++ b/tests/queries/0_stateless/04057_materialized_cte_with_totals.reference @@ -0,0 +1,16 @@ +1 1 4 4 +1 1 4 6 +1 1 6 4 +1 1 6 6 +1 2 4 4 +1 2 4 6 +1 2 6 4 +1 2 6 6 +2 1 4 4 +2 1 4 6 +2 1 6 4 +2 1 6 6 +2 2 4 4 +2 2 4 6 +2 2 6 4 +2 2 6 6 diff --git a/tests/queries/0_stateless/04057_materialized_cte_with_totals.sql b/tests/queries/0_stateless/04057_materialized_cte_with_totals.sql new file mode 100644 index 000000000000..dad50d274b86 --- /dev/null +++ b/tests/queries/0_stateless/04057_materialized_cte_with_totals.sql @@ -0,0 +1,9 @@ +-- Reproducer for a bug where MaterializingCTETransform didn't drop totals/extremes, +-- causing Block structure mismatch when uniting CTE pipelines. +SET enable_materialized_cte = 1; +SET enable_analyzer = 1; + +WITH + cte1 AS MATERIALIZED (SELECT sum(number) FROM numbers(3) GROUP BY number % 2 WITH TOTALS), + cte2 AS MATERIALIZED (SELECT sum(number) FROM numbers(5) GROUP BY number % 2 WITH TOTALS) +SELECT * FROM cte1 AS a, cte1 AS b, cte2 AS c, cte2 AS d ORDER BY ALL; From 891a5ef0fffaa0c4902455ec4b1f935a7fa29bbd Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 13 Apr 2026 07:00:57 +0000 Subject: [PATCH 03/24] Backport #101645 to 26.3: Fix exact subcolumn match priority over prefix match in `getSubcolumnData` --- src/DataTypes/IDataType.cpp | 15 ++++++++++++--- ...4076_tuple_json_dotted_subcolumn_bug.reference | 1 + .../04076_tuple_json_dotted_subcolumn_bug.sql | 9 +++++++++ 3 files changed, 22 insertions(+), 3 deletions(-) create mode 100644 tests/queries/0_stateless/04076_tuple_json_dotted_subcolumn_bug.reference create mode 100644 tests/queries/0_stateless/04076_tuple_json_dotted_subcolumn_bug.sql diff --git a/src/DataTypes/IDataType.cpp b/src/DataTypes/IDataType.cpp index 0ac7c0c249d6..6b9b2c980472 100644 --- a/src/DataTypes/IDataType.cpp +++ b/src/DataTypes/IDataType.cpp @@ -158,6 +158,12 @@ std::unique_ptr IDataType::getSubcolumnData( bool throw_if_null) { std::unique_ptr res; + /// Track whether res was set by an exact name match, so that exact matches + /// always take priority over prefix (dynamic subcolumn) matches. + /// This matters when e.g. JSON has typed paths "a" (Array(JSON)) and "a.b" (Int64): + /// without this, the prefix match on "a" would fire first (sorted order) and + /// the exact match on "a.b" would be skipped because res is already set. + bool res_from_exact_match = false; ISerialization::StreamCallback callback_with_data = [&](const auto & subpath) { @@ -168,15 +174,18 @@ std::unique_ptr IDataType::getSubcolumnData( { auto name = ISerialization::getSubcolumnNameForStream(subpath, prefix_len, false, initial_array_level); /// Create data from path only if it's requested subcolumn. - /// Use the first match to be consistent with ColumnsDescription::addSubcolumns + /// Use the first exact match to be consistent with ColumnsDescription::addSubcolumns /// which also keeps the first subcolumn when there are name collisions /// (e.g. "null" can match both Nullable's null-map and a Tuple element named "null"). - if (name == subcolumn_name && !res) + /// Exact matches always take priority over prefix matches regardless of iteration order. + if (name == subcolumn_name && !res_from_exact_match) { res = std::make_unique(ISerialization::createFromPath(subpath, prefix_len)); + res_from_exact_match = true; } /// Check if this subcolumn is a prefix of requested subcolumn and it can create dynamic subcolumns. - else if (subcolumn_name.starts_with(name + ".") && subpath[i].data.type && subpath[i].data.type->hasDynamicSubcolumnsData()) + /// Only use prefix matches when no exact match has been found. + else if (!res_from_exact_match && subcolumn_name.starts_with(name + ".") && subpath[i].data.type && subpath[i].data.type->hasDynamicSubcolumnsData()) { auto dynamic_subcolumn_name = subcolumn_name.substr(name.size() + 1); auto dynamic_subcolumn_data = subpath[i].data.type->getDynamicSubcolumnData( diff --git a/tests/queries/0_stateless/04076_tuple_json_dotted_subcolumn_bug.reference b/tests/queries/0_stateless/04076_tuple_json_dotted_subcolumn_bug.reference new file mode 100644 index 000000000000..d81cc0710eb6 --- /dev/null +++ b/tests/queries/0_stateless/04076_tuple_json_dotted_subcolumn_bug.reference @@ -0,0 +1 @@ +42 diff --git a/tests/queries/0_stateless/04076_tuple_json_dotted_subcolumn_bug.sql b/tests/queries/0_stateless/04076_tuple_json_dotted_subcolumn_bug.sql new file mode 100644 index 000000000000..ab5b385e8440 --- /dev/null +++ b/tests/queries/0_stateless/04076_tuple_json_dotted_subcolumn_bug.sql @@ -0,0 +1,9 @@ +-- Regression test for https://github.com/ClickHouse/ClickHouse/issues/101271 +-- Selecting a Tuple element whose name contains a dot (e.g. `a.b`) caused a +-- server exception when another element named `a` has a JSON type, because the +-- prefix match on `a` would set `res` before the exact match on `a.b` could. +DROP TABLE IF EXISTS t_json_crash; +CREATE TABLE t_json_crash (t Tuple(a JSON, `a.b` UInt32)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t_json_crash VALUES (('{"b": 999}', 42)); +SELECT t.`a.b` FROM t_json_crash; +DROP TABLE IF EXISTS t_json_crash; From d38923a6b4c7b28e97038be3db7b72bbc781c48c Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 13 Apr 2026 07:02:33 +0000 Subject: [PATCH 04/24] Backport #99279 to 26.3: Check session settings in create queries when engine itself supports settings --- src/Interpreters/InterpreterSetQuery.cpp | 1 + .../integration/test_settings_constraints/test.py | 15 +++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/src/Interpreters/InterpreterSetQuery.cpp b/src/Interpreters/InterpreterSetQuery.cpp index 7bc5e854b1cd..c91aca7d64cf 100644 --- a/src/Interpreters/InterpreterSetQuery.cpp +++ b/src/Interpreters/InterpreterSetQuery.cpp @@ -112,6 +112,7 @@ void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMuta String & name = it->name; if ((!features.supports_settings || !features.has_builtin_setting_fn(name)) && context_settings.has(name)) { + context_->checkSettingsConstraints(*it, SettingSource::QUERY); context_->setSetting(name, it->value); it = engine_settings->changes.erase(it); } diff --git a/tests/integration/test_settings_constraints/test.py b/tests/integration/test_settings_constraints/test.py index c2358c1d57d2..e5520870ad55 100644 --- a/tests/integration/test_settings_constraints/test.py +++ b/tests/integration/test_settings_constraints/test.py @@ -225,6 +225,21 @@ def test_disallowed_constraint_merge_tree(started_cluster): instance.query("DROP TABLE IF EXISTS test") +def test_create_table_query_setting_constraints(started_cluster): + """Test that query-level settings passed in CREATE TABLE's engine SETTINGS clause + are validated against setting constraints (not bypassing them).""" + + # The settings in CREATE TABLE ... SETTINGS should be rejected: + assert "should not be changed" in instance.query_and_get_error( + "CREATE TABLE test_constraint (x Int64) ENGINE = MergeTree ORDER BY x SETTINGS force_index_by_date = 1" + ) + + # Also test with max_memory_usage min constraint via CREATE TABLE SETTINGS + assert "shouldn't be less than" in instance.query_and_get_error( + "CREATE TABLE test_constraint (x Int64) ENGINE = MergeTree ORDER BY x SETTINGS max_memory_usage = 1" + ) + + def assert_query_settings( instance, query, settings, result=None, exception=None, user=None ): From dffc17ad4dcfbeb3897c9bad92216def7d2fc30c Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 13 Apr 2026 10:47:52 +0000 Subject: [PATCH 05/24] Backport #101385 to 26.3: Fix data race on storage_id in IStorage::getDependentViewsByColumn --- src/Storages/IStorage.cpp | 5 +++-- src/Storages/IStorage.h | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Storages/IStorage.cpp b/src/Storages/IStorage.cpp index ca77565cf5c0..b7adabd74f2e 100644 --- a/src/Storages/IStorage.cpp +++ b/src/Storages/IStorage.cpp @@ -356,7 +356,8 @@ Names IStorage::getAllRegisteredNames() const NameDependencies IStorage::getDependentViewsByColumn(ContextPtr context) const { NameDependencies name_deps; - auto view_ids = DatabaseCatalog::instance().getDependentViews(storage_id); + auto current_storage_id = getStorageID(); + auto view_ids = DatabaseCatalog::instance().getDependentViews(current_storage_id); for (const auto & view_id : view_ids) { auto view = DatabaseCatalog::instance().getTable(view_id, context); @@ -368,7 +369,7 @@ NameDependencies IStorage::getDependentViewsByColumn(ContextPtr context) const { auto interpreter = InterpreterSelectQueryAnalyzer(select_query, context, SelectQueryOptions{}.noModify()); auto query_tree = interpreter.getQueryTree(); - required_columns = collectSelectedColumnsFromTable(query_tree, storage_id, context); + required_columns = collectSelectedColumnsFromTable(query_tree, current_storage_id, context); } else { diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index f0084630324d..c166225ea058 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -310,7 +310,7 @@ class IStorage : public std::enable_shared_from_this, public TypePromo virtual void addInferredEngineArgsToCreateQuery(ASTs & /*args*/, const ContextPtr & /*context*/) const {} private: - StorageID storage_id; + StorageID storage_id TSA_GUARDED_BY(id_mutex); mutable std::mutex id_mutex; From 5f0dc360652f32ec1e510310c887218a31a7b235 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 13 Apr 2026 12:39:15 +0000 Subject: [PATCH 06/24] Backport #102397 to 26.3: Fix Alias tables without a target table with Database Replicated --- src/Storages/StorageAlias.h | 24 +++++++++++++-- .../test_replicated_database/test.py | 29 +++++++++++++++++++ 2 files changed, 50 insertions(+), 3 deletions(-) diff --git a/src/Storages/StorageAlias.h b/src/Storages/StorageAlias.h index 6341aa01f073..5d0a2e2e59eb 100644 --- a/src/Storages/StorageAlias.h +++ b/src/Storages/StorageAlias.h @@ -105,8 +105,20 @@ class StorageAlias final : public IStorage, WithContext void updateExternalDynamicMetadataIfExists(ContextPtr local_context) override; void checkTableCanBeDropped(ContextPtr /*query_context*/) const override {} - StorageInMemoryMetadata getInMemoryMetadata() const override { return getTargetTable()->getInMemoryMetadata(); } - StorageMetadataPtr getInMemoryMetadataPtr(bool bypass_metadata_cache) const override { return getTargetTable()->getInMemoryMetadataPtr(bypass_metadata_cache); } + StorageInMemoryMetadata getInMemoryMetadata() const override + { + auto target = tryGetTargetTable(); + if (!target) + return IStorage::getInMemoryMetadata(); + return target->getInMemoryMetadata(); + } + StorageMetadataPtr getInMemoryMetadataPtr(bool bypass_metadata_cache) const override + { + auto target = tryGetTargetTable(); + if (!target) + return IStorage::getInMemoryMetadataPtr(bypass_metadata_cache); + return target->getInMemoryMetadataPtr(bypass_metadata_cache); + } std::optional tryGetInMemoryMetadataPtr() const override { auto target = tryGetTargetTable(); @@ -188,7 +200,13 @@ class StorageAlias final : public IStorage, WithContext return target->getSerializationHints(); } - ActionLock getActionLock(StorageActionBlockType type) override { return getTargetTable()->getActionLock(type); } + ActionLock getActionLock(StorageActionBlockType type) override + { + auto target = tryGetTargetTable(); + if (!target) + return {}; + return target->getActionLock(type); + } TableLockHolder lockForShare(const String & query_id, const std::chrono::milliseconds & acquire_timeout) const { return getTargetTable()->lockForShare(query_id, Poco::Timespan(acquire_timeout.count() * 1000)); } TableLockHolder tryLockForShare(const String & query_id, const std::chrono::milliseconds & acquire_timeout) const diff --git a/tests/integration/test_replicated_database/test.py b/tests/integration/test_replicated_database/test.py index daa3d4069dba..1315309b55d3 100644 --- a/tests/integration/test_replicated_database/test.py +++ b/tests/integration/test_replicated_database/test.py @@ -2015,3 +2015,32 @@ def test_ignore_cluster_name_setting(started_cluster): # Cleanup for node in [main_node, dummy_node]: node.query(f"DROP DATABASE IF EXISTS {db_name} SYNC") + + +def test_alias_with_dropped_target(started_cluster): + db_name = "test_alias_dropped" + + main_node.query(f"DROP DATABASE IF EXISTS {db_name} SYNC") + dummy_node.query(f"DROP DATABASE IF EXISTS {db_name} SYNC") + + main_node.query( + f""" + CREATE DATABASE {db_name} ENGINE = Replicated('/clickhouse/databases/{db_name}', '{{shard}}', '{{replica}}'); + SET allow_experimental_alias_table_engine = 1; + CREATE TABLE {db_name}.base_table (id UInt32, value String) ENGINE = MergeTree ORDER BY id; + CREATE TABLE {db_name}.alias_table ENGINE = Alias('{db_name}', 'base_table'); + DROP TABLE {db_name}.base_table; + """ + ) + + dummy_node.query( + f""" + DROP DATABASE IF EXISTS {db_name} SYNC; + CREATE DATABASE {db_name} ENGINE = Replicated('/clickhouse/databases/{db_name}', '{{shard}}', '{{replica}}'); + SYSTEM SYNC DATABASE REPLICA {db_name}; + """ + ) + + # Cleanup + for node in [main_node, dummy_node]: + node.query(f"DROP DATABASE IF EXISTS {db_name} SYNC") From 5263605493a61f168ad6c6cb350a55ab69c6762e Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 13 Apr 2026 13:47:49 +0000 Subject: [PATCH 07/24] Backport #102408 to 26.3: Revert "Revert "Fix polaris catalog with azure"" --- src/Databases/DataLake/DatabaseDataLake.cpp | 5 + .../DataLake/DatabaseDataLakeSettings.cpp | 1 + src/Databases/DataLake/ICatalog.cpp | 74 ++++++++- src/Databases/DataLake/ICatalog.h | 14 ++ .../tests/gtest_azure_abfss_parsing.cpp | 154 ++++++++++++++++++ 5 files changed, 242 insertions(+), 6 deletions(-) diff --git a/src/Databases/DataLake/DatabaseDataLake.cpp b/src/Databases/DataLake/DatabaseDataLake.cpp index d140a03d6fa0..2bdb55ba9886 100644 --- a/src/Databases/DataLake/DatabaseDataLake.cpp +++ b/src/Databases/DataLake/DatabaseDataLake.cpp @@ -79,6 +79,7 @@ namespace DatabaseDataLakeSetting extern const DatabaseDataLakeSettingsString google_adc_refresh_token; extern const DatabaseDataLakeSettingsString google_adc_quota_project_id; extern const DatabaseDataLakeSettingsString google_adc_credentials_file; + extern const DatabaseDataLakeSettingsBool polaris_style_paths; } namespace Setting @@ -497,6 +498,8 @@ StoragePtr DatabaseDataLake::tryGetTableImpl(const String & name, ContextPtr con { auto catalog = getCatalog(); auto table_metadata = DataLake::TableMetadata().withSchema().withLocation().withDataLakeSpecificProperties(); + if (settings[DatabaseDataLakeSetting::polaris_style_paths]) + table_metadata.withPolarisStyleAbfssPaths(); /// This is added to test that lightweight queries like 'SHOW TABLES' dont end up fetching the table fiu_do_on(FailPoints::lightweight_show_tables, @@ -859,6 +862,8 @@ ASTPtr DatabaseDataLake::getCreateTableQueryImpl( { auto catalog = getCatalog(); auto table_metadata = DataLake::TableMetadata().withLocation().withSchema(); + if (settings[DatabaseDataLakeSetting::polaris_style_paths]) + table_metadata.withPolarisStyleAbfssPaths(); const auto [namespace_name, table_name] = DataLake::parseTableName(name); diff --git a/src/Databases/DataLake/DatabaseDataLakeSettings.cpp b/src/Databases/DataLake/DatabaseDataLakeSettings.cpp index 2eb2e071787b..85826a34c74c 100644 --- a/src/Databases/DataLake/DatabaseDataLakeSettings.cpp +++ b/src/Databases/DataLake/DatabaseDataLakeSettings.cpp @@ -44,6 +44,7 @@ namespace ErrorCodes DECLARE(String, google_adc_credentials_file, "", "Deprecated setting, will throw an exception if used", 0) \ DECLARE(String, dlf_access_key_id, "", "Access id of DLF token for Paimon REST Catalog", 0) \ DECLARE(String, dlf_access_key_secret, "", "Access secret of DLF token for Paimon REST Catalog", 0) \ + DECLARE(Bool, polaris_style_paths, true, "Enable Polaris/ADLS Gen2 path convention: the container name is prepended to the path in ABFSS locations (e.g. abfss://c@account/c/actual/path). When enabled, the redundant container prefix is stripped when building Azure HTTPS URLs. Disable if a real directory inside the container has the same name as the container itself.", 0) \ #define LIST_OF_DATABASE_ICEBERG_SETTINGS(M, ALIAS) \ DATABASE_ICEBERG_RELATED_SETTINGS(M, ALIAS) \ diff --git a/src/Databases/DataLake/ICatalog.cpp b/src/Databases/DataLake/ICatalog.cpp index 85d701d86840..9f9fe6af6b7a 100644 --- a/src/Databases/DataLake/ICatalog.cpp +++ b/src/Databases/DataLake/ICatalog.cpp @@ -118,6 +118,16 @@ void TableMetadata::setLocation(const std::string & location_) /// Azure ABFSS format: extract container (before @) and account (after @) bucket = bucket_part.substr(0, at_pos); azure_account_with_suffix = bucket_part.substr(at_pos + 1); + + /// Some catalogs (e.g. Apache Polaris) follow the ADLS Gen2 filesystem convention + /// of including the container name as the first segment of the path in abfss:// locations, + /// e.g. abfss://container@account.dfs.core.windows.net/container/actual/path. + /// We record this as a flag so that `constructLocation` and `getMetadataLocation` can + /// strip the redundant prefix when needed, while `path` itself is left intact so that + /// `getLocation` remains a round-trip of `setLocation`. + if (polaris_style_abfss_paths && path.starts_with(bucket + "/")) + abfss_has_container_path_prefix = true; + LOG_TEST(getLogger("TableMetadata"), "Parsed Azure location - container: {}, account: {}, path: {}", bucket, azure_account_with_suffix, path); @@ -166,10 +176,15 @@ std::string TableMetadata::constructLocation(const std::string & endpoint_) cons if (!azure_account_with_suffix.empty()) { /// Azure storage - endpoint should be https://.dfs.core.windows.net - /// Construct the full URL with container and path + /// Construct the full URL with container and path. + /// When the path carries a Polaris-style redundant container prefix (e.g. "c/actual/path" + /// for container "c"), strip it before prepending the container, so we don't double it. + std::string_view effective_path = path; + if (abfss_has_container_path_prefix && effective_path.starts_with(bucket + "/")) + effective_path = effective_path.substr(bucket.size() + 1); if (location.ends_with(bucket)) - return std::filesystem::path(location) / path / ""; - return std::filesystem::path(location) / bucket / path / ""; + return std::filesystem::path(location) / effective_path / ""; + return std::filesystem::path(location) / bucket / effective_path / ""; } if (location.ends_with(bucket)) @@ -258,12 +273,59 @@ std::string TableMetadata::getMetadataLocation(const std::string & iceberg_metad metadata_location = metadata_location.substr(storage_type_str.size()); if (data_location.starts_with(storage_type_str)) data_location = data_location.substr(storage_type_str.size()); - else if (!endpoint.empty() && data_location.starts_with(endpoint)) - data_location = data_location.substr(endpoint.size()); + else if (!endpoint.empty()) + { + std::string normalized_endpoint = endpoint; + if (normalized_endpoint.ends_with('/')) + normalized_endpoint.pop_back(); + if (data_location.starts_with(normalized_endpoint)) + { + data_location = data_location.substr(normalized_endpoint.size()); + if (azure_account_with_suffix.empty() && !data_location.empty() && data_location.front() == '/') + data_location = data_location.substr(1); + } + } + + /// For Azure ABFSS locations we need to reconcile two different formats: + /// - metadata_location (from catalog): "container@account.host/path/..." + /// - data_location (with endpoint set): "/container/path/" (HTTPS path after endpoint stripped) + /// When no endpoint is set both sides are in ABFSS authority form and compare directly. + if (!azure_account_with_suffix.empty() && !bucket.empty()) + { + /// The host part after stripping the ABFSS protocol is: bucket@azure_account_with_suffix/ + std::string azure_host_prefix = bucket + "@" + azure_account_with_suffix + "/"; + + /// For Polaris-style paths: the container name is repeated as the first path segment + /// (e.g. abfss://c@account/c/actual/path). Strip that redundant prefix from both sides + /// before the comparison below so we identify the correct relative path. + /// This runs for both with-endpoint and without-endpoint cases. + if (abfss_has_container_path_prefix) + { + auto strip_container = [&](std::string & location_str) + { + if (location_str.starts_with(azure_host_prefix)) + { + std::string_view after_host = std::string_view(location_str).substr(azure_host_prefix.size()); + if (after_host.starts_with(bucket + "/")) + { + location_str = std::string(azure_host_prefix) + std::string(after_host.substr(bucket.size() + 1)); + } + } + }; + strip_container(metadata_location); + strip_container(data_location); + } + + /// With endpoint: data_location is now in HTTPS path form ("/container/path/"). + /// Convert metadata_location from ABFSS authority form ("container@account.host/path") + /// to the matching HTTPS path form ("/container/path") so the prefix comparison works. + if (!endpoint.empty() && metadata_location.starts_with(azure_host_prefix)) + metadata_location = "/" + bucket + "/" + metadata_location.substr(azure_host_prefix.size()); + } if (metadata_location.starts_with(data_location)) { - size_t remove_slash = metadata_location[data_location.size()] == '/' ? 1 : 0; + size_t remove_slash = (metadata_location.size() > data_location.size() && metadata_location[data_location.size()] == '/') ? 1 : 0; metadata_location = metadata_location.substr(data_location.size() + remove_slash); } } diff --git a/src/Databases/DataLake/ICatalog.h b/src/Databases/DataLake/ICatalog.h index e3333c1c58cd..86fe9e7d8c9a 100644 --- a/src/Databases/DataLake/ICatalog.h +++ b/src/Databases/DataLake/ICatalog.h @@ -33,6 +33,10 @@ class TableMetadata TableMetadata & withSchema() { with_schema = true; return *this; } TableMetadata & withStorageCredentials() { with_storage_credentials = true; return *this; } TableMetadata & withDataLakeSpecificProperties() { with_datalake_specific_metadata = true; return *this; } + /// Enable Polaris/ADLS Gen2 convention: when `setLocation` sees an ABFSS URL where the + /// first path segment equals the container name, treat it as a redundant prefix and record + /// it so that `constructLocation` and `getMetadataLocation` can strip it. + TableMetadata & withPolarisStyleAbfssPaths() { polaris_style_abfss_paths = true; return *this; } bool hasLocation() const; bool hasSchema() const; @@ -93,6 +97,16 @@ class TableMetadata /// For Azure ABFSS URLs: stores the account with suffix (e.g., "account.dfs.core.windows.net") /// This is extracted from URLs like: abfss://container@account.dfs.core.windows.net/path std::string azure_account_with_suffix; + /// True when `setLocation` detected that the ABFSS path starts with the container name + /// as a redundant first segment — a convention used by some catalogs (e.g. Apache Polaris / + /// ADLS Gen2 filesystem paths). + /// Example: abfss://c@account.dfs.core.windows.net/c/actual/path — `c` appears in both + /// the authority and the first path segment. + /// When set, `constructLocation` and `getMetadataLocation` strip that prefix when building + /// Azure HTTPS URLs or comparing metadata-file prefixes, but `path` itself is left intact so + /// that `getLocation` remains a round-trip of `setLocation`. + bool polaris_style_abfss_paths = false; + bool abfss_has_container_path_prefix = false; /// Endpoint is set and used in case we have non-AWS storage implementation, for example, Minio. /// Also not all catalogs support non-AWS storages. std::string endpoint; diff --git a/src/Databases/DataLake/tests/gtest_azure_abfss_parsing.cpp b/src/Databases/DataLake/tests/gtest_azure_abfss_parsing.cpp index 84662b11b08e..d36a23544e4e 100644 --- a/src/Databases/DataLake/tests/gtest_azure_abfss_parsing.cpp +++ b/src/Databases/DataLake/tests/gtest_azure_abfss_parsing.cpp @@ -92,6 +92,20 @@ TEST_F(AzureAbfssParsingTest, TableMetadataSetLocationS3) EXPECT_EQ(location, "s3://mybucket/path/to/table"); } +TEST_F(AzureAbfssParsingTest, TableMetadataGetMetadataLocationS3WithHttpEndpoint) +{ + TableMetadata metadata; + metadata.withLocation(); + metadata.setLocation("s3://warehouse-rest/data/testns/testtable"); + metadata.setEndpoint("http://minio:9000"); + + EXPECT_EQ(metadata.getLocation(), "http://minio:9000/warehouse-rest/data/testns/testtable/"); + + const std::string metadata_file = + "s3://warehouse-rest/data/testns/testtable/metadata/v1.metadata.json"; + EXPECT_EQ(metadata.getMetadataLocation(metadata_file), "metadata/v1.metadata.json"); +} + TEST_F(AzureAbfssParsingTest, TableMetadataSetLocationInvalidFormat) { TableMetadata metadata; @@ -112,4 +126,144 @@ TEST_F(AzureAbfssParsingTest, TableMetadataSetLocationMissingPath) }, DB::Exception); } +TEST_F(AzureAbfssParsingTest, TableMetadataSetLocationNonPolarisContainerInPath) +{ + const std::string location = "abfss://c@account.dfs.core.windows.net/c/table"; + + TableMetadata metadata; + metadata.withLocation(); + metadata.setLocation(location); + + EXPECT_EQ(metadata.getLocation(), location); +} + +TEST_F(AzureAbfssParsingTest, TableMetadataSetLocationNonPolarisContainerInPathWithEndpoint) +{ + TableMetadata metadata; + metadata.withLocation().withPolarisStyleAbfssPaths(); + metadata.setLocation("abfss://c@account.dfs.core.windows.net/c/table"); + metadata.setEndpoint("https://account.dfs.core.windows.net"); + + EXPECT_EQ(metadata.getLocation(), "https://account.dfs.core.windows.net/c/table/"); +} + +TEST_F(AzureAbfssParsingTest, TableMetadataGetMetadataLocationNonPolarisContainerInPath) +{ + TableMetadata metadata; + metadata.withLocation(); + metadata.setLocation("abfss://c@account.dfs.core.windows.net/c/table"); + + const std::string metadata_file = + "abfss://c@account.dfs.core.windows.net/c/table/metadata/v1.metadata.json"; + EXPECT_EQ(metadata.getMetadataLocation(metadata_file), "metadata/v1.metadata.json"); +} + +TEST_F(AzureAbfssParsingTest, TableMetadataGetMetadataLocationPolarisStyle) +{ + TableMetadata metadata; + metadata.withLocation().withPolarisStyleAbfssPaths(); + metadata.setLocation("abfss://mycontainer@mystorageaccount.dfs.core.windows.net/mycontainer/actual/path"); + + const std::string metadata_file = + "abfss://mycontainer@mystorageaccount.dfs.core.windows.net/mycontainer/actual/path/metadata/v1.metadata.json"; + EXPECT_EQ(metadata.getMetadataLocation(metadata_file), "metadata/v1.metadata.json"); +} + +TEST_F(AzureAbfssParsingTest, TableMetadataSetLocationPolarisStyle) +{ + const std::string location = "abfss://mycontainer@mystorageaccount.dfs.core.windows.net/mycontainer/actual/path"; + + TableMetadata metadata; + metadata.withLocation().withPolarisStyleAbfssPaths(); + metadata.setLocation(location); + + /// `getLocation` without endpoint is always a round-trip regardless of the Polaris flag. + EXPECT_EQ(metadata.getLocation(), location); +} + +TEST_F(AzureAbfssParsingTest, TableMetadataSetLocationPolarisStyleWithEndpoint) +{ + TableMetadata metadata; + metadata.withLocation().withPolarisStyleAbfssPaths(); + metadata.setLocation("abfss://mycontainer@mystorageaccount.dfs.core.windows.net/mycontainer/actual/path"); + metadata.setEndpoint("https://mystorageaccount.dfs.core.windows.net"); + + EXPECT_EQ( + metadata.getLocation(), + "https://mystorageaccount.dfs.core.windows.net/mycontainer/actual/path/"); +} + +TEST_F(AzureAbfssParsingTest, TableMetadataGetMetadataLocationPolarisStyleWithEndpoint) +{ + TableMetadata metadata; + metadata.withLocation().withPolarisStyleAbfssPaths(); + metadata.setLocation("abfss://mycontainer@account.dfs.core.windows.net/mycontainer/actual/path"); + metadata.setEndpoint("https://account.dfs.core.windows.net"); + + const std::string metadata_file = + "abfss://mycontainer@account.dfs.core.windows.net/mycontainer/actual/path/metadata/v1.metadata.json"; + EXPECT_EQ(metadata.getMetadataLocation(metadata_file), "metadata/v1.metadata.json"); +} + +TEST_F(AzureAbfssParsingTest, TableMetadataGetMetadataLocationEqualStrings) +{ + TableMetadata metadata; + metadata.withLocation(); + metadata.setLocation("abfss://c@account.dfs.core.windows.net/c/table"); + + const std::string metadata_file = "abfss://c@account.dfs.core.windows.net/c/table"; + EXPECT_EQ(metadata.getMetadataLocation(metadata_file), ""); +} + +TEST_F(AzureAbfssParsingTest, TableMetadataGetMetadataLocationPolarisStyleEndpointTrailingSlash) +{ + TableMetadata metadata_no_slash; + metadata_no_slash.withLocation().withPolarisStyleAbfssPaths(); + metadata_no_slash.setLocation("abfss://mycontainer@account.dfs.core.windows.net/mycontainer/actual/path"); + metadata_no_slash.setEndpoint("https://account.dfs.core.windows.net"); + + TableMetadata metadata_with_slash; + metadata_with_slash.withLocation().withPolarisStyleAbfssPaths(); + metadata_with_slash.setLocation("abfss://mycontainer@account.dfs.core.windows.net/mycontainer/actual/path"); + metadata_with_slash.setEndpoint("https://account.dfs.core.windows.net/"); + + const std::string metadata_file = + "abfss://mycontainer@account.dfs.core.windows.net/mycontainer/actual/path/metadata/v1.metadata.json"; + + EXPECT_EQ(metadata_no_slash.getMetadataLocation(metadata_file), "metadata/v1.metadata.json"); + EXPECT_EQ(metadata_with_slash.getMetadataLocation(metadata_file), "metadata/v1.metadata.json"); +} + +TEST_F(AzureAbfssParsingTest, TableMetadataContainerNamedDirNotStrippedWithoutPolarisFlag) +{ + TableMetadata metadata; + metadata.withLocation(); + metadata.setLocation("abfss://mycontainer@account.dfs.core.windows.net/mycontainer/data/table"); + metadata.setEndpoint("https://account.dfs.core.windows.net"); + + EXPECT_EQ( + metadata.getLocation(), + "https://account.dfs.core.windows.net/mycontainer/mycontainer/data/table/"); + + const std::string metadata_file = + "abfss://mycontainer@account.dfs.core.windows.net/mycontainer/data/table/metadata/v1.metadata.json"; + EXPECT_EQ(metadata.getMetadataLocation(metadata_file), "metadata/v1.metadata.json"); +} + +TEST_F(AzureAbfssParsingTest, TableMetadataContainerNamedDirStrippedWithPolarisFlag) +{ + TableMetadata metadata; + metadata.withLocation().withPolarisStyleAbfssPaths(); + metadata.setLocation("abfss://mycontainer@account.dfs.core.windows.net/mycontainer/data/table"); + metadata.setEndpoint("https://account.dfs.core.windows.net"); + + EXPECT_EQ( + metadata.getLocation(), + "https://account.dfs.core.windows.net/mycontainer/data/table/"); + + const std::string metadata_file = + "abfss://mycontainer@account.dfs.core.windows.net/mycontainer/data/table/metadata/v1.metadata.json"; + EXPECT_EQ(metadata.getMetadataLocation(metadata_file), "metadata/v1.metadata.json"); +} + } From 4f92428e2d0d1623ea7a83bc6e3cbd87f047e0b5 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 13 Apr 2026 21:29:47 +0000 Subject: [PATCH 08/24] Backport #100837 to 26.3: Try fixing use-after-free in RemoteQueryExecutor --- src/Processors/QueryPlan/ReadFromRemote.cpp | 3 ++- src/QueryPipeline/RemoteQueryExecutor.cpp | 9 +++++++-- src/QueryPipeline/RemoteQueryExecutor.h | 5 ++++- src/Storages/IStorageCluster.cpp | 3 ++- src/Storages/StorageDistributed.cpp | 8 ++++++-- 5 files changed, 21 insertions(+), 7 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromRemote.cpp b/src/Processors/QueryPlan/ReadFromRemote.cpp index 2d0c4df7941a..883b28126b93 100644 --- a/src/Processors/QueryPlan/ReadFromRemote.cpp +++ b/src/Processors/QueryPlan/ReadFromRemote.cpp @@ -631,7 +631,8 @@ void ReadFromRemote::addLazyPipe( my_scalars["_shard_num"] = Block{ {DataTypeUInt32().createColumnConst(1, my_shard.shard_info.shard_num), std::make_shared(), "_shard_num"}}; auto remote_query_executor = std::make_shared( - std::move(connections), query_string, header, my_context, my_throttler, my_scalars, my_external_tables, stage_to_use, my_shard.query_plan); + std::move(connections), query_string, header, my_context, my_throttler, my_scalars, my_external_tables, stage_to_use, + my_shard.query_plan, /*extension=*/std::nullopt, my_shard.shard_info.pool); auto pipe = createRemoteSourcePipe( remote_query_executor, add_agg_info, add_totals, add_extremes, async_read, async_query_sending, parallel_marshalling_threads); diff --git a/src/QueryPipeline/RemoteQueryExecutor.cpp b/src/QueryPipeline/RemoteQueryExecutor.cpp index 33da62f0d394..3ff9ef0a9827 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.cpp +++ b/src/QueryPipeline/RemoteQueryExecutor.cpp @@ -185,10 +185,15 @@ RemoteQueryExecutor::RemoteQueryExecutor( const Tables & external_tables_, QueryProcessingStage::Enum stage_, std::shared_ptr query_plan_, - std::optional extension_) + std::optional extension_, + ConnectionPoolWithFailoverPtr pool) : RemoteQueryExecutor(query_, header_, context_, scalars_, external_tables_, stage_, std::move(query_plan_), extension_) { - create_connections = [this, connections_, throttler, extension_](AsyncCallback) mutable + /// Capture `pool` in the lambda to prevent the connection pool from being destroyed + /// while entries are still in use. The Entry objects hold raw references (via PoolEntryHelper) + /// back to the pool's internal PooledObject and PoolBase structures, so the pool must + /// outlive all Entry objects. + create_connections = [this, connections_, throttler, extension_, pool](AsyncCallback) mutable { auto res = std::make_unique(std::move(connections_), context, throttler); if (extension_ && extension_->replica_info) diff --git a/src/QueryPipeline/RemoteQueryExecutor.h b/src/QueryPipeline/RemoteQueryExecutor.h index 3e2a83105570..e57aad64813b 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.h +++ b/src/QueryPipeline/RemoteQueryExecutor.h @@ -86,6 +86,8 @@ class RemoteQueryExecutor std::optional extension_ = std::nullopt); /// Accepts several connections already taken from pool. + /// The optional `pool` parameter keeps the connection pool alive while entries are in use, + /// preventing use-after-free when the pool would otherwise be destroyed before the entries. RemoteQueryExecutor( std::vector && connections_, const String & query_, @@ -96,7 +98,8 @@ class RemoteQueryExecutor const Tables & external_tables_ = Tables(), QueryProcessingStage::Enum stage_ = QueryProcessingStage::Complete, std::shared_ptr query_plan_ = nullptr, - std::optional extension_ = std::nullopt); + std::optional extension_ = std::nullopt, + ConnectionPoolWithFailoverPtr pool = nullptr); /// Takes a pool and gets one or several connections from it. RemoteQueryExecutor( diff --git a/src/Storages/IStorageCluster.cpp b/src/Storages/IStorageCluster.cpp index c6c69c0f21bc..f2853ec8c0e7 100644 --- a/src/Storages/IStorageCluster.cpp +++ b/src/Storages/IStorageCluster.cpp @@ -232,7 +232,8 @@ void ReadFromCluster::initializePipeline(QueryPipelineBuilder & pipeline, const Tables(), processed_stage, nullptr, - RemoteQueryExecutor::Extension{.task_iterator = extension->task_iterator, .replica_info = std::move(replica_info)}); + RemoteQueryExecutor::Extension{.task_iterator = extension->task_iterator, .replica_info = std::move(replica_info)}, + shard_info.pool); remote_query_executor->setLogger(log); Pipe pipe{std::make_shared( diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 155587764a75..b073c6e7ae7a 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -1199,7 +1199,10 @@ std::optional StorageDistributed::distributedWriteBetweenDistribu /// INSERT SELECT query returns empty block auto remote_query_executor - = std::make_shared(std::move(connections), new_query_str, std::make_shared(Block{}), query_context); + = std::make_shared( + std::move(connections), new_query_str, std::make_shared(Block{}), query_context, + /*throttler=*/nullptr, Scalars{}, Tables{}, QueryProcessingStage::Complete, + /*query_plan=*/nullptr, /*extension=*/std::nullopt, shard_info.pool); QueryPipeline remote_pipeline(std::make_shared( remote_query_executor, false, settings[Setting::async_socket_for_remote], settings[Setting::async_query_sending_for_remote])); remote_pipeline.complete(std::make_shared(remote_query_executor->getSharedHeader())); @@ -1330,7 +1333,8 @@ std::optional StorageDistributed::distributedWriteFromClusterStor Tables{}, QueryProcessingStage::Complete, nullptr, - RemoteQueryExecutor::Extension{.task_iterator = extension.task_iterator, .replica_info = std::move(replica_info)}); + RemoteQueryExecutor::Extension{.task_iterator = extension.task_iterator, .replica_info = std::move(replica_info)}, + replicas.pool); Pipe pipe{std::make_shared( remote_query_executor, From 4699ac575453f6efa7a35f023f772755f2cde808 Mon Sep 17 00:00:00 2001 From: strtgbb <146047128+strtgbb@users.noreply.github.com> Date: Tue, 14 Apr 2026 09:51:25 -0400 Subject: [PATCH 09/24] Rebase CICD on v26.3.9.8-lts --- .claude/tools/fetch_ci_report.js | 26 +- .cursor/rules/code-style.mdc | 42 + .cursor/skills/audit-review/SKILL.md | 151 + .../10_project-antalya-bug-report.md | 36 + .github/ISSUE_TEMPLATE/10_question.yaml | 20 - .../ISSUE_TEMPLATE/20_feature-request.yaml | 38 - .../20_project-antalya-feature-request.md | 20 + .../30_project-antalya-question.md | 16 + .../30_unexpected-behaviour.yaml | 55 - .../35_incomplete_implementation.yaml | 50 - .../40_altinity-stable-bug-report.md | 50 + .../ISSUE_TEMPLATE/45_usability-issue.yaml | 48 - .../50_altinity-stable-question.md | 16 + .github/ISSUE_TEMPLATE/50_build-issue.yaml | 50 - .../60_documentation-issue.yaml | 26 - .../ISSUE_TEMPLATE/70_performance-issue.yaml | 48 - .../80_backward-compatibility.yaml | 48 - .github/ISSUE_TEMPLATE/85_bug-report.yaml | 76 - .github/ISSUE_TEMPLATE/90_fuzzing-report.yaml | 26 - .../ISSUE_TEMPLATE/95_sanitizer-report.yaml | 26 - .../96_installation-issues.yaml | 46 - .github/PULL_REQUEST_TEMPLATE.md | 50 +- .github/actionlint.yml | 10 +- .../actions/create_workflow_report/action.yml | 52 + .../ci_run_report.html.jinja | 274 ++ .../create_workflow_report.py | 986 ++++ .../test_report_queries.py | 126 + .../workflow_report_hook.sh | 7 + .github/actions/docker_setup/action.yml | 32 + .github/actions/runner_setup/action.yml | 19 + .github/dco.yml | 17 + .github/grype/parse_vulnerabilities_grype.py | 32 + .github/grype/run_grype_scan.sh | 18 + .../grype/transform_and_upload_results_s3.sh | 20 + .github/retry.sh | 22 + .github/workflows/README.md | 13 + .github/workflows/auto_releases.yml | 99 - .github/workflows/backport_branches.yml | 1272 ----- .github/workflows/cancel.yml | 19 + .github/workflows/cherry_pick.yml | 47 - .github/workflows/compare_fails.yml | 110 + .github/workflows/create_release.yml | 357 -- .github/workflows/custom_build_praktika.yml | 55 - .github/workflows/docker_publish.yml | 150 + .github/workflows/grype_scan.yml | 154 + .github/workflows/hourly.yml | 128 - .github/workflows/init_praktika.yml | 29 + .github/workflows/master.yml | 3647 +++++++------- .github/workflows/merge_queue.yml | 279 -- .github/workflows/nightly_coverage.yml | 510 -- .github/workflows/nightly_fuzzers.yml | 244 - .github/workflows/nightly_jepsen.yml | 244 - .github/workflows/nightly_statistics.yml | 91 - .github/workflows/optimize_toolchain.yml | 294 -- .github/workflows/pull_request.yml | 4225 ++++++----------- .github/workflows/pull_request_community.yml | 4012 ++++++++++++++++ .../workflows/regression-reusable-suite.yml | 193 + .github/workflows/regression.yml | 508 ++ .github/workflows/release_builds.yml | 1390 ++++++ .github/workflows/repo-sanity-checks.yml | 150 + .github/workflows/reusable_sign.yml | 166 + .github/workflows/scheduled_runs.yml | 55 + .github/workflows/sign_and_release.yml | 567 +++ ci/defs/defs.py | 211 +- ci/defs/job_configs.py | 211 +- ci/docker/binary-builder/Dockerfile | 11 +- ci/docker/cctools/Dockerfile | 4 +- ci/docker/fasttest/Dockerfile | 6 +- ci/docker/fuzzer/Dockerfile | 6 +- ci/docker/integration/arrowflight/Dockerfile | 2 +- ci/docker/integration/base/Dockerfile | 8 +- .../clickhouse_with_hms_catalog/Dockerfile | 2 +- .../clickhouse_with_unity_catalog/Dockerfile | 2 +- .../integration/helper_container/Dockerfile | 2 +- ci/docker/integration/kerberos_kdc/Dockerfile | 2 +- ci/docker/integration/mysql57/Dockerfile | 2 +- ci/docker/integration/mysql80/Dockerfile | 2 +- .../mysql_dotnet_client/Dockerfile | 2 +- .../mysql_golang_client/Dockerfile | 2 +- .../integration/mysql_java_client/Dockerfile | 2 +- .../integration/mysql_js_client/Dockerfile | 5 +- .../integration/mysql_php_client/Dockerfile | 2 +- .../postgresql_java_client/Dockerfile | 2 +- ci/docker/integration/resolver/Dockerfile | 2 +- ci/docker/integration/runner/Dockerfile | 8 +- ci/docker/integration/s3_proxy/Dockerfile | 2 +- ci/docker/keeper-jepsen-test/Dockerfile | 2 +- ci/docker/libfuzzer/Dockerfile | 2 +- ci/docker/performance-comparison/Dockerfile | 6 +- ci/docker/server-jepsen-test/Dockerfile | 2 +- ci/docker/sqlancer-test/Dockerfile | 2 +- ci/docker/stateless-test/Dockerfile | 8 +- ci/docker/stress-test/Dockerfile | 4 +- ci/docker/stress-test/README.md | 2 +- ci/docker/style-test/Dockerfile | 2 +- ci/docker/test-base/Dockerfile | 6 +- ci/jobs/ast_fuzzer_job.py | 2 +- ci/jobs/build_clickhouse.py | 22 +- ci/jobs/clickbench.py | 8 +- ci/jobs/docker_server.py | 54 +- ci/jobs/fast_test.py | 20 +- ci/jobs/functional_tests.py | 88 +- ci/jobs/fuzzers_job.py | 5 +- ci/jobs/install_check.py | 9 +- ci/jobs/integration_test_job.py | 133 +- ci/jobs/llvm_coverage_job.py | 4 +- ci/jobs/scripts/clickhouse_proc.py | 31 +- ci/jobs/scripts/clickhouse_version.py | 15 + ci/jobs/scripts/find_tests.py | 34 +- .../functional_tests/export_coverage.py | 7 +- ci/jobs/scripts/functional_tests_results.py | 18 +- ci/jobs/scripts/fuzzer/run-fuzzer.sh | 7 +- ci/jobs/scripts/integration_tests_configs.py | 45 +- ci/jobs/scripts/workflow_hooks/filter_job.py | 46 +- .../scripts/workflow_hooks/parse_ci_tags.py | 18 + ci/jobs/scripts/workflow_hooks/store_data.py | 6 +- ci/jobs/scripts/workflow_hooks/version_log.py | 35 +- ci/jobs/stress_job.py | 28 +- ci/praktika/_environment.py | 73 +- ci/praktika/cidb.py | 14 +- ci/praktika/execution/__main__.py | 4 + ci/praktika/execution/execution_settings.py | 2 +- ci/praktika/gh.py | 6 +- ci/praktika/hook_cache.py | 2 +- ci/praktika/info.py | 4 + ci/praktika/job.py | 2 +- ci/praktika/native_jobs.py | 37 +- ci/praktika/parser.py | 11 +- ci/praktika/result.py | 5 +- ci/praktika/runner.py | 13 + ci/praktika/s3.py | 41 + ci/praktika/workflow.py | 3 + ci/praktika/yaml_additional_templates.py | 168 + ci/praktika/yaml_generator.py | 92 +- ci/settings/altinity_overrides.py | 73 + ci/settings/settings.py | 8 +- ci/workflows/backport_branches.py | 1 + ci/workflows/master.py | 54 +- ci/workflows/merge_queue.py | 4 +- ci/workflows/pull_request.py | 100 +- ci/workflows/pull_request_community.py | 137 + ci/workflows/release_branches.py | 2 +- ci/workflows/release_builds.py | 72 + cmake/autogenerated_versions.txt | 8 +- cmake/version.cmake | 9 +- contrib/jwt-cpp-cmake/CMakeLists.txt | 5 +- docker/keeper/Dockerfile | 16 +- docker/server/Dockerfile.alpine | 13 +- docker/server/Dockerfile.ubuntu | 71 +- docker/server/README.md | 2 +- docker/server/README.src/github-repo | 2 +- docker/server/README.src/license.md | 2 +- docker/server/README.src/logo.svg | 56 +- docker/server/README.src/maintainer.md | 2 +- docker/test/upgrade/Dockerfile | 29 + packages/clickhouse-client.yaml | 6 +- packages/clickhouse-common-static-dbg.yaml | 6 +- packages/clickhouse-common-static.yaml | 6 +- packages/clickhouse-keeper-dbg.yaml | 6 +- packages/clickhouse-keeper.yaml | 6 +- packages/clickhouse-server.yaml | 6 +- programs/server/binary.html | 5 +- programs/server/config.xml | 6 +- programs/server/config.yaml.example | 4 +- programs/server/dashboard.html | 2 +- programs/server/embedded.xml | 6 +- programs/server/index.html | 109 +- programs/server/merges.html | 2 +- programs/server/play.html | 47 +- src/Common/SignalHandlers.cpp | 6 +- tests/broken_tests.yaml | 308 ++ tests/ci/changelog.py | 19 +- tests/ci/ci_buddy.py | 4 +- tests/ci/clickhouse_helper.py | 18 +- tests/ci/create_release.py | 2 +- tests/ci/env_helper.py | 17 +- tests/ci/get_robot_token.py | 14 +- tests/ci/git_helper.py | 44 +- tests/ci/pr_info.py | 26 +- .../packaging/ansible/inventory/localhost.yml | 73 + .../roles/get_cloudfront_info/tasks/main.yml | 34 + .../ansible/roles/publish_pkgs/tasks/main.yml | 98 + .../roles/update_bin_repo/tasks/main.yml | 52 + .../roles/update_deb_repo/tasks/main.yml | 61 + .../templates/apt-ftparchive-stable.conf | 6 + .../templates/apt-ftparchive.conf | 17 + .../roles/update_rpm_repo/tasks/main.yml | 51 + .../roles/update_rpm_repo/templates/repo.j2 | 7 + .../update_rpm_repo/templates/rpmmacros.j2 | 1 + .../roles/update_tar_repo/tasks/main.yml | 61 + .../packaging/ansible/sign-and-release.yml | 8 + .../release/packaging/dirindex/dirindexgen.py | 122 + .../packaging/static/bootstrap.bundle.min.js | 7 + tests/ci/s3_helper.py | 41 + tests/ci/sign_release.py | 97 + tests/ci/version_helper.py | 136 +- tests/clickhouse-test | 141 + tests/config/config.d/azure_storage_conf.xml | 8 +- tests/docker_scripts/stress_runner.sh | 2 +- .../compose/docker_compose_arrowflight.yml | 2 +- .../compose/docker_compose_azurite.yml | 2 +- .../compose/docker_compose_clickhouse.yml | 2 +- .../compose/docker_compose_dotnet_client.yml | 2 +- .../docker_compose_iceberg_hms_catalog.yml | 2 +- .../compose/docker_compose_jdbc_bridge.yml | 1 + .../compose/docker_compose_keeper.yml | 6 +- .../docker_compose_kerberized_kafka.yml | 2 +- .../compose/docker_compose_kerberos_kdc.yml | 2 +- .../compose/docker_compose_minio.yml | 6 +- .../compose/docker_compose_mysql.yml | 2 +- .../compose/docker_compose_mysql_8_0.yml | 2 +- .../docker_compose_mysql_dotnet_client.yml | 2 +- .../docker_compose_mysql_golang_client.yml | 2 +- .../docker_compose_mysql_java_client.yml | 2 +- .../docker_compose_mysql_js_client.yml | 2 +- .../docker_compose_mysql_php_client.yml | 2 +- .../compose/docker_compose_nginx.yml | 2 +- .../docker_compose_postgresql_java_client.yml | 2 +- tests/integration/helpers/cluster.py | 18 +- .../test_attach_partition_using_copy/test.py | 4 +- .../test_backward_compatibility/test.py | 2 +- .../test_aggregate_fixed_key.py | 2 +- .../test_aggregate_function_state.py | 4 +- .../test_convert_ordinary.py | 2 +- .../test_cte_distributed.py | 2 +- .../test_functions.py | 2 +- .../test_insert_profile_events.py | 2 +- .../test_ip_types_binary_compatibility.py | 2 +- .../test_memory_bound_aggregation.py | 4 +- .../test_normalized_count_comparison.py | 2 +- ...test_old_client_with_replicated_columns.py | 2 +- .../test_rocksdb_upgrade.py | 2 +- .../test_select_aggregate_alias_column.py | 2 +- .../test_short_strings_aggregation.py | 12 +- ...test_vertical_merges_from_compact_parts.py | 2 +- tests/integration/test_cow_policy/test.py | 4 +- tests/integration/test_database_delta/test.py | 2 +- tests/integration/test_database_glue/test.py | 2 +- .../test_disk_over_web_server/test.py | 2 +- .../test.py | 2 +- tests/integration/test_dremio_engine/test.py | 5 +- .../test_lightweight_updates/test.py | 2 +- .../test_polymorphic_parts/test.py | 2 +- .../test.py | 4 +- .../test_replicating_constants/test.py | 4 +- tests/integration/test_storage_s3/test_sts.py | 2 +- .../test_trace_log_build_id/test.py | 2 +- tests/integration/test_ttl_replicated/test.py | 6 +- tests/integration/test_version_update/test.py | 2 +- .../test.py | 6 +- .../queries/0_stateless/01528_play.reference | 2 +- tests/queries/0_stateless/01528_play.sh | 2 +- utils/tests-visualizer/index.html | 2 +- 253 files changed, 15871 insertions(+), 9890 deletions(-) create mode 100644 .cursor/rules/code-style.mdc create mode 100644 .cursor/skills/audit-review/SKILL.md create mode 100644 .github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md delete mode 100644 .github/ISSUE_TEMPLATE/10_question.yaml delete mode 100644 .github/ISSUE_TEMPLATE/20_feature-request.yaml create mode 100644 .github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md create mode 100644 .github/ISSUE_TEMPLATE/30_project-antalya-question.md delete mode 100644 .github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml delete mode 100644 .github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml create mode 100644 .github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md delete mode 100644 .github/ISSUE_TEMPLATE/45_usability-issue.yaml create mode 100644 .github/ISSUE_TEMPLATE/50_altinity-stable-question.md delete mode 100644 .github/ISSUE_TEMPLATE/50_build-issue.yaml delete mode 100644 .github/ISSUE_TEMPLATE/60_documentation-issue.yaml delete mode 100644 .github/ISSUE_TEMPLATE/70_performance-issue.yaml delete mode 100644 .github/ISSUE_TEMPLATE/80_backward-compatibility.yaml delete mode 100644 .github/ISSUE_TEMPLATE/85_bug-report.yaml delete mode 100644 .github/ISSUE_TEMPLATE/90_fuzzing-report.yaml delete mode 100644 .github/ISSUE_TEMPLATE/95_sanitizer-report.yaml delete mode 100644 .github/ISSUE_TEMPLATE/96_installation-issues.yaml create mode 100644 .github/actions/create_workflow_report/action.yml create mode 100644 .github/actions/create_workflow_report/ci_run_report.html.jinja create mode 100755 .github/actions/create_workflow_report/create_workflow_report.py create mode 100755 .github/actions/create_workflow_report/test_report_queries.py create mode 100755 .github/actions/create_workflow_report/workflow_report_hook.sh create mode 100644 .github/actions/docker_setup/action.yml create mode 100644 .github/actions/runner_setup/action.yml create mode 100644 .github/dco.yml create mode 100644 .github/grype/parse_vulnerabilities_grype.py create mode 100755 .github/grype/run_grype_scan.sh create mode 100755 .github/grype/transform_and_upload_results_s3.sh create mode 100755 .github/retry.sh create mode 100644 .github/workflows/README.md delete mode 100644 .github/workflows/auto_releases.yml delete mode 100644 .github/workflows/backport_branches.yml create mode 100644 .github/workflows/cancel.yml delete mode 100644 .github/workflows/cherry_pick.yml create mode 100644 .github/workflows/compare_fails.yml delete mode 100644 .github/workflows/create_release.yml delete mode 100644 .github/workflows/custom_build_praktika.yml create mode 100644 .github/workflows/docker_publish.yml create mode 100644 .github/workflows/grype_scan.yml delete mode 100644 .github/workflows/hourly.yml create mode 100644 .github/workflows/init_praktika.yml delete mode 100644 .github/workflows/merge_queue.yml delete mode 100644 .github/workflows/nightly_coverage.yml delete mode 100644 .github/workflows/nightly_fuzzers.yml delete mode 100644 .github/workflows/nightly_jepsen.yml delete mode 100644 .github/workflows/nightly_statistics.yml delete mode 100644 .github/workflows/optimize_toolchain.yml create mode 100644 .github/workflows/pull_request_community.yml create mode 100644 .github/workflows/regression-reusable-suite.yml create mode 100644 .github/workflows/regression.yml create mode 100644 .github/workflows/release_builds.yml create mode 100644 .github/workflows/repo-sanity-checks.yml create mode 100644 .github/workflows/reusable_sign.yml create mode 100644 .github/workflows/scheduled_runs.yml create mode 100644 .github/workflows/sign_and_release.yml create mode 100644 ci/jobs/scripts/workflow_hooks/parse_ci_tags.py create mode 100644 ci/praktika/yaml_additional_templates.py create mode 100644 ci/settings/altinity_overrides.py create mode 100644 ci/workflows/pull_request_community.py create mode 100644 ci/workflows/release_builds.py create mode 100644 docker/test/upgrade/Dockerfile create mode 100644 tests/broken_tests.yaml create mode 100644 tests/ci/release/packaging/ansible/inventory/localhost.yml create mode 100644 tests/ci/release/packaging/ansible/roles/get_cloudfront_info/tasks/main.yml create mode 100644 tests/ci/release/packaging/ansible/roles/publish_pkgs/tasks/main.yml create mode 100644 tests/ci/release/packaging/ansible/roles/update_bin_repo/tasks/main.yml create mode 100644 tests/ci/release/packaging/ansible/roles/update_deb_repo/tasks/main.yml create mode 100644 tests/ci/release/packaging/ansible/roles/update_deb_repo/templates/apt-ftparchive-stable.conf create mode 100644 tests/ci/release/packaging/ansible/roles/update_deb_repo/templates/apt-ftparchive.conf create mode 100644 tests/ci/release/packaging/ansible/roles/update_rpm_repo/tasks/main.yml create mode 100644 tests/ci/release/packaging/ansible/roles/update_rpm_repo/templates/repo.j2 create mode 100644 tests/ci/release/packaging/ansible/roles/update_rpm_repo/templates/rpmmacros.j2 create mode 100644 tests/ci/release/packaging/ansible/roles/update_tar_repo/tasks/main.yml create mode 100644 tests/ci/release/packaging/ansible/sign-and-release.yml create mode 100755 tests/ci/release/packaging/dirindex/dirindexgen.py create mode 100644 tests/ci/release/packaging/static/bootstrap.bundle.min.js create mode 100644 tests/ci/sign_release.py diff --git a/.claude/tools/fetch_ci_report.js b/.claude/tools/fetch_ci_report.js index bf4222b97b49..ef6b843da7a9 100755 --- a/.claude/tools/fetch_ci_report.js +++ b/.claude/tools/fetch_ci_report.js @@ -6,7 +6,7 @@ * node fetch_ci_report.js [options] * * URL formats supported: - * - GitHub PR URLs: https://github.com/ClickHouse/ClickHouse/pull/12345 (fetches ALL CI reports) + * - GitHub PR URLs: https://github.com/Altinity/ClickHouse/pull/12345 (fetches ALL CI reports) * - HTML URLs: https://s3.amazonaws.com/.../json.html?PR=...&sha=...&name_0=... * - Direct JSON URLs: https://s3.amazonaws.com/.../result_*.json * @@ -21,10 +21,10 @@ * --credentials HTTP Basic Auth credentials (comma-separated). Only for ClickHouse_private repository * * Examples: - * node fetch_ci_report.js "https://github.com/ClickHouse/ClickHouse/pull/97171" - * node fetch_ci_report.js "https://github.com/ClickHouse/ClickHouse/pull/97171" --failed --cidb - * node fetch_ci_report.js "https://github.com/ClickHouse/ClickHouse/pull/97171" --report 2 - * node fetch_ci_report.js "https://s3.amazonaws.com/clickhouse-test-reports/json.html?PR=94537&..." + * node fetch_ci_report.js "https://github.com/Altinity/ClickHouse/pull/97171" + * node fetch_ci_report.js "https://github.com/Altinity/ClickHouse/pull/97171" --failed --cidb + * node fetch_ci_report.js "https://github.com/Altinity/ClickHouse/pull/97171" --report 2 + * node fetch_ci_report.js "https://s3.amazonaws.com/altinity-build-artifacts/json.html?PR=94537&..." * node fetch_ci_report.js "https://s3.amazonaws.com/.../result_integration_tests.json" * node fetch_ci_report.js "" --test peak_memory --links * node fetch_ci_report.js "" --failed --download-logs @@ -291,7 +291,7 @@ function extractArtifactLinks(jsonData) { */ async function getCIReportsFromPR(prUrl) { // Parse PR number from URL - const match = prUrl.match(/github\.com\/ClickHouse\/ClickHouse\/pull\/(\d+)/); + const match = prUrl.match(/github\.com\/Altinity\/ClickHouse\/pull\/(\d+)/); if (!match) { throw new Error('Invalid GitHub PR URL format'); } @@ -301,7 +301,7 @@ async function getCIReportsFromPR(prUrl) { // Fetch PR comments to find CI bot comment try { - const commentsJson = execSync(`gh api repos/ClickHouse/ClickHouse/issues/${prNumber}/comments --paginate --jq '.[] | select(.user.login == "clickhouse-gh[bot]") | {body, created_at}'`, { + const commentsJson = execSync(`gh api repos/Altinity/ClickHouse/issues/${prNumber}/comments --paginate --jq '.[] | select(.user.login == "clickhouse-gh[bot]") | {body, created_at}'`, { encoding: 'utf8', stdio: ['pipe', 'pipe', 'pipe'] }); @@ -313,7 +313,7 @@ async function getCIReportsFromPR(prUrl) { } // Search through all bot comments for CI report URLs (not just the latest) - const reportUrlPattern = /https:\/\/s3\.amazonaws\.com\/clickhouse-test-reports\/json\.html\?[^\s)]+/g; + const reportUrlPattern = /https:\/\/s3\.amazonaws\.com\/altinity-build-artifacts\/json\.html\?[^\s)]+/g; for (const comment of comments) { if (!comment.body) continue; const urls = comment.body.match(reportUrlPattern); @@ -654,7 +654,7 @@ async function main() { Usage: node fetch_ci_report.js [options] URL formats: - - GitHub PR: https://github.com/ClickHouse/ClickHouse/pull/12345 (fetches ALL CI reports) + - GitHub PR: https://github.com/Altinity/ClickHouse/pull/12345 (fetches ALL CI reports) - CI HTML: https://s3.amazonaws.com/.../json.html?PR=...&sha=...&name_0=... - Direct JSON: https://s3.amazonaws.com/.../result_*.json @@ -669,10 +669,10 @@ Options: --credentials HTTP Basic Auth credentials Examples: - node fetch_ci_report.js "https://github.com/ClickHouse/ClickHouse/pull/97171" - node fetch_ci_report.js "https://github.com/ClickHouse/ClickHouse/pull/97171" --failed --cidb - node fetch_ci_report.js "https://github.com/ClickHouse/ClickHouse/pull/97171" --report 2 - node fetch_ci_report.js "https://s3.amazonaws.com/clickhouse-test-reports/json.html?PR=94537&sha=abc123&name_0=Integration%20tests" + node fetch_ci_report.js "https://github.com/Altinity/ClickHouse/pull/97171" + node fetch_ci_report.js "https://github.com/Altinity/ClickHouse/pull/97171" --failed --cidb + node fetch_ci_report.js "https://github.com/Altinity/ClickHouse/pull/97171" --report 2 + node fetch_ci_report.js "https://s3.amazonaws.com/altinity-build-artifacts/json.html?PR=94537&sha=abc123&name_0=Integration%20tests" node fetch_ci_report.js "" --test peak_memory --links node fetch_ci_report.js "" --failed --download-logs `); diff --git a/.cursor/rules/code-style.mdc b/.cursor/rules/code-style.mdc new file mode 100644 index 000000000000..ea867d1ee062 --- /dev/null +++ b/.cursor/rules/code-style.mdc @@ -0,0 +1,42 @@ +--- +description: Enforces surgical, minimal edit style matching this CI/CD codebase +alwaysApply: true +--- + +# Code Style — Surgical Edits + +## Context + +This is a ClickHouse branch maintained by Altinity. The primary work is CI/CD: GitHub Actions workflows, Python CI scripts, shell scripts, Dockerfiles, YAML configs, and broken-test management across multiple version branches. + +## Core Rules + +1. **Minimal diffs.** Change only the lines required by the task. Never touch surrounding code, whitespace, imports, or formatting. + +2. **No unsolicited refactoring.** Do not rename, restructure, extract helpers, or "clean up" code that isn't part of the request. If a change seems to require restructuring, ask first. + +3. **Prefer safe, reversible changes over clean-ups.** An early `return` with a `NOTE` comment is better than deleting dead code that might be needed later. + +4. **Match existing patterns exactly.** When adding entries to structured files (YAML, workflow definitions, job configs), copy the format of neighbouring entries verbatim. + +5. **Breadcrumb comments only.** When removing or bypassing code, leave a short `# NOTE (strtgbb): ` comment. Do not add explanatory comments, docstrings, or type annotations that weren't asked for. + +6. **Mechanical replication.** When the same fix applies to multiple files (e.g. bumping a timeout), apply the identical minimal change to each file. Do not "improve" one of them while you're there. + +7. **Ask before going wide.** If a task would touch more than ~3 files or change the structure of anything, confirm the scope before proceeding. + +8. **Use surrounding logging conventions.** Do not assume `print()` — look at how the surrounding code logs and use the same mechanism (e.g. `logging`, a custom logger, `Shell.check`, `print`, etc.). + +9. **Merge-friendly changes.** Structure edits to minimize git merge conflicts. Prefer appending to lists over inserting in the middle. Avoid reformatting lines adjacent to the change. Keep hunks small and isolated. + +10. **Never edit generated files directly.** Files starting with `# generated by praktika` are generated — do not edit them. Instead, edit the corresponding source template under the `ci/` directory. The user will regenerate the output. + +## Anti-patterns + +- Rewriting a function to be "cleaner" +- Mixing logging styles (e.g. adding `logging.info()` where `print()` is used nearby, or vice versa) +- Changing quote styles, whitespace, or import order +- Extracting constants, helpers, or classes that don't exist yet +- Deleting dead code instead of short-circuiting past it +- Editing a `# generated by praktika` file instead of its source template +- Inserting into the middle of a list when appending would work diff --git a/.cursor/skills/audit-review/SKILL.md b/.cursor/skills/audit-review/SKILL.md new file mode 100644 index 000000000000..cf92e91c4a01 --- /dev/null +++ b/.cursor/skills/audit-review/SKILL.md @@ -0,0 +1,151 @@ +--- +name: audit-review +description: Perform deep feature audits with transition-matrix and logical fault-injection validation. Use when reviewing complex changes, regressions, state-machine behavior, config interactions, API/protocol flows, and concurrency-sensitive logic. +--- + +# Audit Review + +## Purpose + +Run a repeatable deep audit for any feature and report confirmed defects with severity. +Default mode is static reasoning unless runtime execution is explicitly performed. + +## Workflow + +1. If PR scope is large, partition by functionality/workstream first: + - define partitions and boundaries, + - review each partition independently with the full workflow below, + - track per-partition findings and coverage, + - deduplicate cross-partition findings by root cause, + - finish with cross-partition interaction risks. +2. Build call graph first: + - user/system entrypoints (API, RPC, CLI, worker, scheduler) + - dispatch and validation layers + - state/storage/cache interactions + - downstream integrations (network, filesystem, service calls) + - exception and error-propagation paths +3. Build transition matrix: + - request/event entry -> processing stages -> state changes -> outputs/side effects + - define key invariants and annotate where each transition must preserve them +4. Perform logical testing of all code paths: + - enumerate all reachable branches in changed logic, + - record expected branch outcomes (success, handled failure, fail-open/fail-closed, exception), + - include happy path, malformed input, integration timeout/failure, and concurrency/timing branches. +5. Define logical fault categories from the code under review: + - derive categories from actual components, transitions, and dependencies in scope, + - document category boundary and affected states/transitions, + - prioritize categories by risk and blast radius. +6. Run logical fault injection category-by-category: + - execute one category at a time, + - for each category cover success/failure/edge/concurrency paths as applicable, + - record pass/fail-open/fail-closed/exception behavior per injected fault. + - maintain a category completion matrix with status: + - Executed / Not Applicable / Deferred, + - outcome, + - defects found, + - justification for Not Applicable or Deferred. +7. Confirm each finding with code-path evidence. +8. Produce coverage accounting: + - reviewed vs unreviewed call-graph nodes, + - reviewed vs unreviewed transitions, + - executed vs skipped fault categories (with reasons). + - mark coverage complete only when every in-scope node/transition/category is reviewed or explicitly skipped with justification. +9. For multithreaded/shared-state paths, perform interleaving analysis: + - write several plausible thread interleavings per critical transition, + - identify race/deadlock/lifetime hazards per interleaving. +10. For mutation-heavy paths, perform rollback/partial-update analysis: + - reason about exception/cancellation at intermediate points, + - verify state invariants still hold. + +## C++ Bug-Type Coverage (Required for C++ audits) + +- memory lifetime defects (use-after-free/use-after-move/dangling refs) +- iterator/reference invalidation +- data races and lock-order/deadlock risks +- exception-safety and partial-update rollback hazards +- integer overflow/underflow and signedness conversion bugs +- ownership/resource leaks (RAII violations) +- undefined behavior from invalid casts/aliasing/lifetime misuse + +## Multithreaded Database Emphasis + +For ClickHouse-style multithreaded systems, prioritize these checks before lower-risk issues: + +1. Shared mutable state touched by multiple threads without clear synchronization. +2. Lock hierarchy consistency and potential lock-order inversion/deadlock cycles. +3. Cross-thread lifetime safety (dangling references/pointers after erase/reload/shutdown). +4. Concurrent container mutation + iterator/reference use. +5. Exception/cancellation paths that can leave locks/state inconsistent. + +## Output Contract (Required) + +Always perform the full deep analysis workflow above, but keep the final user-visible report short and limited to: + +1. `Confirmed defects` +2. `Coverage summary` + +```markdown +AI audit note: This review comment was generated by AI (gpt-5.3-codex). + +Audit update for PR # (): + +Confirmed defects: + + : + Impact: + Anchor: / + Trigger: + Why defect: <1-2 lines, behavior not preference> + Fix direction (short): <1 line> + Regression test direction (short): <1 line> + + Medium -> Low> + +Coverage summary: + + Scope reviewed: + Categories failed: + Categories passed: + Assumptions/limits: +``` + +If no confirmed defects: +- output `No confirmed defects in reviewed scope.` +- still include `Coverage summary`. + +### Short-form constraints (required) + +- Keep each defect compact and actionable. +- Include only confirmed defects. +- Use snippets only when needed to prove a defect, or when the user asks. +- Do not include full workflow narrative sections in the report. + +## Severity Rubric + +- High: realistic trigger can cause crash/UB/data corruption/auth bypass/deadlock. +- Medium: correctness/reliability issue with narrower trigger conditions. +- Low: diagnostics/consistency issues without direct correctness break. + +## Checklist + +- Verify call graph is explicitly documented before defect analysis. +- Verify invariants are explicitly listed and checked against transitions. +- Verify fail-open vs fail-closed behavior where security-sensitive. +- Verify logical branch coverage for all changed code paths. +- Verify fault categories are explicitly defined from the reviewed code before injection starts. +- Verify category-by-category execution and reporting completeness. +- Verify full fault-category completion matrix is present and complete. +- Verify concurrency and cache/state transition paths. +- Verify multithreaded interleavings are explicitly analyzed for critical shared-state paths. +- Verify rollback/partial-update safety under exception/cancellation points. +- Verify major C++ bug classes are explicitly covered (or marked not applicable). +- Verify race/deadlock/crash class defects are prioritized and explicitly reported. +- Verify error-contract consistency across equivalent fault paths. +- Verify performance/resource failure classes were considered. +- Verify findings are deduplicated by root cause. +- Verify coverage accounting is present (covered vs skipped with reason). +- Verify stop-condition criteria for coverage completion are explicitly satisfied. +- Verify every confirmed defect includes code evidence snippets. +- Verify parser/config/runtime consistency. +- Verify protocol/API parity across entrypoints. +- Verify no sensitive-data leakage in logs/errors. diff --git a/.github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md b/.github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md new file mode 100644 index 000000000000..0c8c15a05eaf --- /dev/null +++ b/.github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md @@ -0,0 +1,36 @@ +--- +name: Project Antalya Bug Report +about: Help us improve Project Antalya +title: '' +labels: antalya +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Key information** +Provide relevant runtime details. + - Project Antalya Build Version + - Cloud provider, e.g., AWS + - Kubernetes provider, e.g., GKE or Minikube + - Object storage, e.g., AWS S3 or Minio + - Iceberg catalog, e.g., Glue with REST Proxy + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/10_question.yaml b/.github/ISSUE_TEMPLATE/10_question.yaml deleted file mode 100644 index 71a3d3da6425..000000000000 --- a/.github/ISSUE_TEMPLATE/10_question.yaml +++ /dev/null @@ -1,20 +0,0 @@ -name: Question -description: Ask a question about ClickHouse -labels: ["question"] -body: - - type: markdown - attributes: - value: | - > Make sure to check documentation https://clickhouse.com/docs/ first. If the question is concise and probably has a short answer, asking it in [community Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-1gh9ds7f4-PgDhJAaF8ad5RbWBAAjzFg) is probably the fastest way to find the answer. For more complicated questions, consider asking them on StackOverflow with "clickhouse" tag https://stackoverflow.com/questions/tagged/clickhouse - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Question - description: Please put your question here. - validations: - required: true diff --git a/.github/ISSUE_TEMPLATE/20_feature-request.yaml b/.github/ISSUE_TEMPLATE/20_feature-request.yaml deleted file mode 100644 index 054efc2d61ee..000000000000 --- a/.github/ISSUE_TEMPLATE/20_feature-request.yaml +++ /dev/null @@ -1,38 +0,0 @@ -name: Feature request -description: Suggest an idea for ClickHouse -labels: ["feature"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Use case - description: A clear and concise description of what the intended usage scenario is. - validations: - required: true - - type: textarea - attributes: - label: Describe the solution you'd like - description: A clear and concise description of what you want to happen. - validations: - required: true - - type: textarea - attributes: - label: Describe alternatives you've considered - description: A clear and concise description of any alternative solutions or features you've considered. - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context or screenshots about the feature request here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md b/.github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md new file mode 100644 index 000000000000..603584bf4428 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md @@ -0,0 +1,20 @@ +--- +name: Project Antalya Feature request +about: Suggest an idea for Project Antalya +title: '' +labels: antalya, enhancement +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/30_project-antalya-question.md b/.github/ISSUE_TEMPLATE/30_project-antalya-question.md new file mode 100644 index 000000000000..c77cee4a916b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/30_project-antalya-question.md @@ -0,0 +1,16 @@ +--- +name: Project Antalya Question +about: Ask a question about Project Antalya +title: '' +labels: '' +assignees: '' + +--- + +Make sure to check the [Altinity documentation](https://docs.altinity.com/) and the [Altinity Knowledge Base](https://kb.altinity.com/) first. + +If your question is concise and probably has a short answer, asking it in the [the Altinity Slack workspace](https://altinity.com/slack) is probably the fastest way to find the answer. Use the #antalya channel. + +If you'd rather file a GitHub issue, remove all this text and ask your question here. + +Please include relevant environment information as applicable. diff --git a/.github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml b/.github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml deleted file mode 100644 index 7a34c4bb7ba8..000000000000 --- a/.github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml +++ /dev/null @@ -1,55 +0,0 @@ -name: Unexpected behaviour -description: Some feature is working in non-obvious way -labels: ["unexpected behaviour"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the unexpected behaviour - description: A clear and concise description of what doesn't work as it is supposed to. - validations: - required: true - - type: textarea - attributes: - label: Which ClickHouse versions are affected? - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: | - * Which ClickHouse server version to use - * Which interface to use, if matters - * Non-default settings, if any - * `CREATE TABLE` statements for all tables involved - * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary - * Queries to run that lead to unexpected result - validations: - required: true - - type: textarea - attributes: - label: Expected behavior - description: A clear and concise description of what you expected to happen. - validations: - required: false - - type: textarea - attributes: - label: Error message and/or stacktrace - description: If applicable, add screenshots to help explain your problem. - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml b/.github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml deleted file mode 100644 index 969c1893e6f5..000000000000 --- a/.github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml +++ /dev/null @@ -1,50 +0,0 @@ -name: Incomplete implementation -description: Implementation of existing feature is not finished -labels: ["unfinished code"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the unexpected behaviour - description: A clear and concise description of what works not as it is supposed to. - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: | - * Which ClickHouse server version to use - * Which interface to use, if matters - * Non-default settings, if any - * `CREATE TABLE` statements for all tables involved - * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary - * Queries to run that lead to unexpected result - validations: - required: true - - type: textarea - attributes: - label: Expected behavior - description: A clear and concise description of what you expected to happen. - validations: - required: false - - type: textarea - attributes: - label: Error message and/or stacktrace - description: If applicable, add screenshots to help explain your problem. - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md b/.github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md new file mode 100644 index 000000000000..90bf241dc195 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md @@ -0,0 +1,50 @@ +--- +name: Altinity Stable Bug report +about: Report something broken in an Altinity Stable Build +title: '' +labels: stable +assignees: '' + +--- + +✅ *I checked [the Altinity Stable Builds lifecycle table](https://docs.altinity.com/altinitystablebuilds/#altinity-stable-builds-life-cycle-table), and the Altinity Stable Build version I'm using is still supported.* + +## Type of problem +Choose one of the following items, then delete the others: + +**Bug report** - something's broken + +**Incomplete implementation** - something's not quite right + +**Performance issue** - something works, just not as quickly as it should + +**Backwards compatibility issue** - something used to work, but now it doesn't + +**Unexpected behavior** - something surprising happened, but it wasn't the good kind of surprise + +**Installation issue** - something doesn't install the way it should + +**Usability issue** - something works, but it could be a lot easier + +**Documentation issue** - something in the docs is wrong, incomplete, or confusing + +## Describe the situation +A clear, concise description of what's happening. Can you reproduce it in a ClickHouse Official build of the same version? + +## How to reproduce the behavior + +* Which Altinity Stable Build version to use +* Which interface to use, if it matters +* Non-default settings, if any +* `CREATE TABLE` statements for all tables involved +* Sample data for all these tables, use the [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/31fd4f5eb41d5ec26724fc645c11fe4d62eae07f/programs/obfuscator/README.md) if necessary +* Queries to run that lead to an unexpected result + +## Expected behavior +A clear, concise description of what you expected to happen. + +## Logs, error messages, stacktraces, screenshots... +Add any details that might explain the issue. + +## Additional context +Add any other context about the issue here. diff --git a/.github/ISSUE_TEMPLATE/45_usability-issue.yaml b/.github/ISSUE_TEMPLATE/45_usability-issue.yaml deleted file mode 100644 index 0d2ae1a580e5..000000000000 --- a/.github/ISSUE_TEMPLATE/45_usability-issue.yaml +++ /dev/null @@ -1,48 +0,0 @@ -name: Usability improvement request -description: Report something can be made more convenient to use -labels: ["usability"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the improvement - description: A clear and concise description of what you want to happen - validations: - required: true - - type: textarea - attributes: - label: Which ClickHouse versions are affected? - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: | - * Which interface to use, if matters - * Non-default settings, if any - * `CREATE TABLE` statements for all tables involved - * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary - * Queries to run that lead to unexpected result - validations: - required: true - - type: textarea - attributes: - label: Error message and/or stacktrace - description: If applicable, add screenshots to help explain your problem. - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/50_altinity-stable-question.md b/.github/ISSUE_TEMPLATE/50_altinity-stable-question.md new file mode 100644 index 000000000000..027970e25a02 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/50_altinity-stable-question.md @@ -0,0 +1,16 @@ +--- +name: Altinity Stable Question +about: Ask a question about an Altinity Stable Build +title: '' +labels: question, stable +assignees: '' + +--- + +Make sure to check the [Altinity documentation](https://docs.altinity.com/) and the [Altinity Knowledge Base](https://kb.altinity.com/) first. + +If your question is concise and probably has a short answer, asking it in the [the Altinity Slack channel](https://altinity.com/slack) is probably the fastest way to find the answer. + +For more complicated questions, consider [asking them on StackOverflow with the tag "clickhouse"](https://stackoverflow.com/questions/tagged/clickhouse). + +If you'd rather file a GitHub issue, remove all this text and ask your question here. diff --git a/.github/ISSUE_TEMPLATE/50_build-issue.yaml b/.github/ISSUE_TEMPLATE/50_build-issue.yaml deleted file mode 100644 index 0549944c0bb2..000000000000 --- a/.github/ISSUE_TEMPLATE/50_build-issue.yaml +++ /dev/null @@ -1,50 +0,0 @@ -name: Build issue -description: Report failed ClickHouse build from master -labels: ["build"] -body: - - type: markdown - attributes: - value: | - > Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.com/docs/en/development/build/ - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the problem - description: A clear and concise description of what doesn't work as it is supposed to. - validations: - required: true - - type: textarea - attributes: - label: Operating system - description: OS kind or distribution, specific version/release, non-standard kernel if any. If you are trying to build inside virtual machine, please mention it too. - validations: - required: false - - type: textarea - attributes: - label: CMake version - description: The output of `cmake --version`. - validations: - required: false - - type: textarea - attributes: - label: Ninja version - description: The output of `ninja --version`. - validations: - required: false - - type: textarea - attributes: - label: Compiler name and version - description: We recommend to use clang. The version can be obtained via `clang --version`. - validations: - required: false - - type: textarea - attributes: - label: Full cmake and/or ninja output with the error - description: Please include everything (use https://pastila.nl/ for large output)! - validations: - required: true diff --git a/.github/ISSUE_TEMPLATE/60_documentation-issue.yaml b/.github/ISSUE_TEMPLATE/60_documentation-issue.yaml deleted file mode 100644 index bba6df87a783..000000000000 --- a/.github/ISSUE_TEMPLATE/60_documentation-issue.yaml +++ /dev/null @@ -1,26 +0,0 @@ -name: Documentation issue -description: Report something incorrect or missing in documentation -labels: ["comp-documentation"] -body: - - type: markdown - attributes: - value: | - > Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.com/docs/en/development/build/ - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the issue - description: A clear and concise description of what's wrong in documentation. - validations: - required: true - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/70_performance-issue.yaml b/.github/ISSUE_TEMPLATE/70_performance-issue.yaml deleted file mode 100644 index 1df99dc76fda..000000000000 --- a/.github/ISSUE_TEMPLATE/70_performance-issue.yaml +++ /dev/null @@ -1,48 +0,0 @@ -name: Performance issue -description: Report something working slower than expected -labels: ["performance"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the situation - description: What exactly works slower than expected? - validations: - required: true - - type: textarea - attributes: - label: Which ClickHouse versions are affected? - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: | - * Which interface to use, if matters - * Non-default settings, if any - * `CREATE TABLE` statements for all tables involved - * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary - * Queries to run that lead to unexpected result - validations: - required: true - - type: textarea - attributes: - label: Expected performance - description: What are your performance expectation, why do you think they are realistic? Has it been working faster in older ClickHouse releases? Is it working faster in some specific other system? - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/80_backward-compatibility.yaml b/.github/ISSUE_TEMPLATE/80_backward-compatibility.yaml deleted file mode 100644 index 72f56d781979..000000000000 --- a/.github/ISSUE_TEMPLATE/80_backward-compatibility.yaml +++ /dev/null @@ -1,48 +0,0 @@ -name: Backward compatibility issue -description: Report the case when the behaviour of a new version can break existing use cases -labels: ["backward compatibility"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe the unexpected behaviour - description: A clear and concise description of what works not as it is supposed to. - validations: - required: true - - type: textarea - attributes: - label: Which ClickHouse versions are affected? - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: | - * Which interface to use, if matters - * Non-default settings, if any - * `CREATE TABLE` statements for all tables involved - * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary - * Queries to run that lead to unexpected result - validations: - required: true - - type: textarea - attributes: - label: Error message and/or stacktrace - description: If applicable, add screenshots to help explain your problem. - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/85_bug-report.yaml b/.github/ISSUE_TEMPLATE/85_bug-report.yaml deleted file mode 100644 index 349bf82a3a4e..000000000000 --- a/.github/ISSUE_TEMPLATE/85_bug-report.yaml +++ /dev/null @@ -1,76 +0,0 @@ -name: Bug report -description: Wrong behavior (visible to users) in the official ClickHouse release. -labels: ["potential bug"] -body: - - type: markdown - attributes: - value: | - > Please make sure that the version you're using is still supported (you can find the list [here](https://github.com/ClickHouse/ClickHouse/blob/master/SECURITY.md#scope-and-supported-versions)). - > You have to provide the following information whenever possible. - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Describe what's wrong - description: | - * A clear and concise description of what works not as it is supposed to. - * A link to reproducer in [https://fiddle.clickhouse.com/](https://fiddle.clickhouse.com/). - validations: - required: true - - type: dropdown - attributes: - label: Does it reproduce on the most recent release? - description: | - [The list of releases](https://github.com/ClickHouse/ClickHouse/blob/master/utils/list-versions/version_date.tsv) - options: - - 'Yes' - - 'No' - validations: - required: true - - type: markdown - attributes: - value: | - ----- - > Change "enabled" to true in "send_crash_reports" section in `config.xml`: - ```xml - - - - true - - ``` - ----- - - type: textarea - attributes: - label: How to reproduce - description: | - * Which ClickHouse server version to use - * Which interface to use, if matters - * Non-default settings, if any - * `CREATE TABLE` statements for all tables involved - * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary - * Queries to run that lead to unexpected result - validations: - required: true - - type: textarea - attributes: - label: Expected behavior - description: A clear and concise description of what you expected to happen. - validations: - required: false - - type: textarea - attributes: - label: Error message and/or stacktrace - description: If applicable, add screenshots to help explain your problem. - validations: - required: false - - type: textarea - attributes: - label: Additional context - description: Add any other context about the problem here. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml b/.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml deleted file mode 100644 index 84dc8a372e5a..000000000000 --- a/.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml +++ /dev/null @@ -1,26 +0,0 @@ -name: Assertion found via fuzzing -description: Potential issue has been found via Fuzzer or Stress tests -labels: ["fuzz"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Describe the bug - description: A link to the report. - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: Try to reproduce the report and copy the tables and queries involved. - validations: - required: false - - type: textarea - attributes: - label: Error message and/or stacktrace - description: You can find additional information in server logs. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/95_sanitizer-report.yaml b/.github/ISSUE_TEMPLATE/95_sanitizer-report.yaml deleted file mode 100644 index 7bb47e2b824b..000000000000 --- a/.github/ISSUE_TEMPLATE/95_sanitizer-report.yaml +++ /dev/null @@ -1,26 +0,0 @@ -name: Sanitizer alert -description: Potential issue has been found by special code instrumentation -labels: ["testing"] -body: - - type: markdown - attributes: - value: | - > (you don't have to strictly follow this form) - - type: textarea - attributes: - label: Describe the bug - description: A link to the report. - validations: - required: true - - type: textarea - attributes: - label: How to reproduce - description: Try to reproduce the report and copy the tables and queries involved. - validations: - required: false - - type: textarea - attributes: - label: Error message and/or stacktrace - description: You can find additional information in server logs. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/96_installation-issues.yaml b/.github/ISSUE_TEMPLATE/96_installation-issues.yaml deleted file mode 100644 index f71f6079453e..000000000000 --- a/.github/ISSUE_TEMPLATE/96_installation-issues.yaml +++ /dev/null @@ -1,46 +0,0 @@ -name: Installation issue -description: Issue with ClickHouse installation from https://clickhouse.com/docs/en/install/ -labels: ["comp-install"] -body: - - type: markdown - attributes: - value: | - > **I have tried the following solutions**: https://clickhouse.com/docs/en/faq/troubleshooting/#troubleshooting-installation-errors - - type: textarea - attributes: - label: Company or project name - description: Put your company name or project description here. - validations: - required: false - - type: textarea - attributes: - label: Installation type - description: Packages, docker, single binary, curl? - validations: - required: true - - type: textarea - attributes: - label: Source of the ClickHouse - description: A link to the source. Or the command you've tried. - validations: - required: true - - type: textarea - attributes: - label: Describe the problem. - description: What went wrong and what is the expected result? - validations: - required: true - - type: textarea - attributes: - label: Error message and/or stacktrace - description: You can find additional information in server logs. - validations: - required: false - - type: textarea - attributes: - label: How to reproduce - description: | - * For Linux-based operating systems: provide a script for clear docker container from the official image - * For anything else: steps to reproduce on as much as possible clear system - validations: - required: false diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 6af3162f8ed2..5a43fec377e1 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -12,24 +12,40 @@ - Not for changelog (changelog entry is not required) -### Changelog entry (a [user-readable short description](https://github.com/ClickHouse/ClickHouse/blob/master/docs/changelog_entry_guidelines.md) of the changes that goes into CHANGELOG.md): +### Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md): ... ### Documentation entry for user-facing changes +... -- [ ] Documentation is written (mandatory for new features) - - +### CI/CD Options +#### Exclude tests: +- [ ] Fast test +- [ ] Integration Tests +- [ ] Stateless tests +- [ ] Stateful tests +- [ ] Performance tests +- [ ] All with ASAN +- [x] All with TSAN +- [x] All with MSAN +- [x] All with UBSAN +- [x] All with Coverage +- [ ] All with Aarch64 +- [ ] All Regression +- [ ] Disable CI Cache + +#### Regression jobs to run: +- [ ] Fast suites (mostly <1h) +- [ ] Aggregate Functions (2h) +- [ ] Alter (1.5h) +- [ ] Benchmark (30m) +- [ ] ClickHouse Keeper (1h) +- [x] Iceberg (2h) +- [ ] LDAP (1h) +- [x] Parquet (1.5h) +- [ ] RBAC (1.5h) +- [ ] SSL Server (1h) +- [ ] S3 (2h) +- [x] S3 Export (2h) +- [x] Swarms (30m) +- [ ] Tiered Storage (2h) diff --git a/.github/actionlint.yml b/.github/actionlint.yml index cf5f575e3c74..904a548dadd5 100644 --- a/.github/actionlint.yml +++ b/.github/actionlint.yml @@ -1,9 +1,9 @@ self-hosted-runner: labels: - - builder - - func-tester - - func-tester-aarch64 + - altinity-builder + - altinity-func-tester + - altinity-func-tester-aarch64 - fuzzer-unit-tester - - style-checker - - style-checker-aarch64 + - altinity-style-checker + - altinity-style-checker-aarch64 - release-maker diff --git a/.github/actions/create_workflow_report/action.yml b/.github/actions/create_workflow_report/action.yml new file mode 100644 index 000000000000..5e0f517c35a0 --- /dev/null +++ b/.github/actions/create_workflow_report/action.yml @@ -0,0 +1,52 @@ +name: Create and Upload Combined Report +description: Create and upload a combined CI report +inputs: + workflow_config: + description: "Workflow config" + required: true + final: + description: "Control whether the report is final or a preview" + required: false + default: "false" +runs: + using: "composite" + steps: + - name: Create workflow config + shell: bash + run: | + mkdir -p ./ci/tmp + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ inputs.workflow_config }} + EOF + + - name: Create and upload workflow report + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + ACTIONS_RUN_URL: ${{ github.event.repository.html_url }}/actions/runs/${{ github.run_id }} + COMMIT_SHA: ${{ steps.set_version.outputs.commit_sha || github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + FINAL: ${{ inputs.final }} + shell: bash + run: | + pip install clickhouse-driver==0.2.8 numpy==1.26.4 pandas==2.0.3 jinja2==3.1.5 + + CMD="python3 .github/actions/create_workflow_report/create_workflow_report.py" + ARGS="--actions-run-url $ACTIONS_RUN_URL --known-fails tests/broken_tests.yaml --cves --pr-number $PR_NUMBER" + + set +e -x + if [[ "$FINAL" == "false" ]]; then + REPORT_LINK=$($CMD $ARGS --mark-preview) + else + REPORT_LINK=$($CMD $ARGS) + fi + + echo $REPORT_LINK + + if [[ "$FINAL" == "true" ]]; then + IS_VALID_URL=$(echo $REPORT_LINK | grep -E '^https?://') + if [[ -n $IS_VALID_URL ]]; then + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + else + echo "Error: $REPORT_LINK" >> $GITHUB_STEP_SUMMARY + exit 1 + fi + fi diff --git a/.github/actions/create_workflow_report/ci_run_report.html.jinja b/.github/actions/create_workflow_report/ci_run_report.html.jinja new file mode 100644 index 000000000000..4c94465a16c6 --- /dev/null +++ b/.github/actions/create_workflow_report/ci_run_report.html.jinja @@ -0,0 +1,274 @@ + + + + + + + + {%- if is_preview %} + + {%- endif %} + + {{ title }} + + + + +

+ +

{{ title }}

+ + + + + + + + + + + + + + + + + + + + + + + +
Pull Request{{ pr_info_html }}
Workflow Run{{ workflow_id }}
Commit{{ commit_sha }}
Build Report{% for job_name, link in build_report_links.items() %}[{{ job_name }}] {% endfor %}
Date {{ date }}
+ {% if is_preview %} +

This is a preview. The workflow is not yet finished.

+ {% endif %} +

Table of Contents

+ + + {%- if pr_number != 0 -%} +

New Fails in PR

+

Compared with base sha {{ base_sha }}

+ {{ new_fails_html }} + {%- endif %} + +

CI Jobs Status

+ {{ ci_jobs_status_html }} + +

Checks Errors

+ {{ checks_errors_html }} + +

Checks New Fails

+ {{ checks_fails_html }} + +

Regression New Fails

+ {{ regression_fails_html }} + +

Docker Images CVEs

+ {{ docker_images_cves_html }} + +

Checks Known Fails

+

+ Fail reason conventions:
+ KNOWN - Accepted fail and fix is not planned
+ INVESTIGATE - We don't know why it fails
+ NEEDSFIX - Investigation done and a fix is needed to make it pass
+

+ {{ checks_known_fails_html }} + + + + \ No newline at end of file diff --git a/.github/actions/create_workflow_report/create_workflow_report.py b/.github/actions/create_workflow_report/create_workflow_report.py new file mode 100755 index 000000000000..adb682be2dcd --- /dev/null +++ b/.github/actions/create_workflow_report/create_workflow_report.py @@ -0,0 +1,986 @@ +#!/usr/bin/env python3 +import argparse +import os +import time +from pathlib import Path +from itertools import combinations +import json +from datetime import datetime +from functools import lru_cache +from glob import glob +import urllib.parse +import re + +import pandas as pd +from jinja2 import Environment, FileSystemLoader +import requests +from clickhouse_driver import Client +from clickhouse_driver.errors import ServerException +import boto3 +from botocore.exceptions import NoCredentialsError +import yaml + + +DATABASE_HOST_VAR = "CHECKS_DATABASE_HOST" +DATABASE_USER_VAR = "CLICKHOUSE_TEST_STAT_LOGIN" +DATABASE_PASSWORD_VAR = "CLICKHOUSE_TEST_STAT_PASSWORD" +S3_BUCKET = "altinity-build-artifacts" +GITHUB_REPO = "Altinity/ClickHouse" +GITHUB_TOKEN = os.getenv("GITHUB_TOKEN") or os.getenv("GH_TOKEN") + +CVE_SEVERITY_ORDER = {"critical": 1, "high": 2, "medium": 3, "low": 4, "negligible": 5} + +def _is_clickhouse_memory_limit_error(exc: BaseException) -> bool: + if isinstance(exc, ServerException) and getattr(exc, "code", None) == 241: + return True + msg = str(exc).lower() + return "memory limit" in msg or "memory_limit" in msg + + +def query_dataframe_with_retry( + client: Client, + query: str, + *, + max_attempts: int = 5, + backoff_seconds: float = 3.0, +) -> pd.DataFrame: + for attempt in range(1, max_attempts + 1): + try: + return client.query_dataframe(query) + except Exception as e: + if not _is_clickhouse_memory_limit_error(e) or attempt >= max_attempts: + raise + wait = backoff_seconds * attempt + time.sleep(wait) + +def get_commit_statuses(sha: str) -> pd.DataFrame: + """ + Fetch commit statuses for a given SHA and return as a pandas DataFrame. + Handles pagination to get all statuses. + + Args: + sha (str): Commit SHA to fetch statuses for. + + Returns: + pd.DataFrame: DataFrame containing all statuses. + """ + headers = { + "Authorization": f"token {GITHUB_TOKEN}", + "Accept": "application/vnd.github.v3+json", + } + + url = f"https://api.github.com/repos/{GITHUB_REPO}/commits/{sha}/statuses" + + all_data = [] + + while url: + response = requests.get(url, headers=headers) + + if response.status_code != 200: + raise Exception( + f"Failed to fetch statuses: {response.status_code} {response.text}" + ) + + data = response.json() + all_data.extend(data) + + # Check for pagination links in the response headers + if "Link" in response.headers: + links = response.headers["Link"].split(",") + next_url = None + + for link in links: + parts = link.strip().split(";") + if len(parts) == 2 and 'rel="next"' in parts[1]: + next_url = parts[0].strip("<>") + break + + url = next_url + else: + url = None + + # Parse relevant fields + parsed = [ + { + "job_name": item["context"], + "job_status": item["state"], + "message": item["description"], + "results_link": item["target_url"], + } + for item in all_data + ] + + # Create DataFrame + df = pd.DataFrame(parsed) + + # Drop duplicates keeping the first occurrence (newest status for each context) + # GitHub returns statuses in reverse chronological order + df = df.drop_duplicates(subset=["job_name"], keep="first") + + # Sort by status and job name + return df.sort_values( + by=["job_status", "job_name"], ascending=[True, True] + ).reset_index(drop=True) + + +def get_pr_info_from_number(pr_number: str) -> dict: + """ + Fetch pull request information for a given PR number. + + Args: + pr_number (str): Pull request number to fetch information for. + + Returns: + dict: Dictionary containing PR information. + """ + headers = { + "Authorization": f"token {GITHUB_TOKEN}", + "Accept": "application/vnd.github.v3+json", + } + + url = f"https://api.github.com/repos/{GITHUB_REPO}/pulls/{pr_number}" + response = requests.get(url, headers=headers) + + if response.status_code != 200: + raise Exception( + f"Failed to fetch pull request info: {response.status_code} {response.text}" + ) + + return response.json() + + +def get_run_details(run_id: str) -> dict: + """ + Fetch run details for a given run URL. + """ + headers = { + "Authorization": f"token {GITHUB_TOKEN}", + "Accept": "application/vnd.github.v3+json", + } + + url = f"https://api.github.com/repos/{GITHUB_REPO}/actions/runs/{run_id}" + response = requests.get(url, headers=headers) + + if response.status_code != 200: + raise Exception( + f"Failed to fetch run details: {response.status_code} {response.text}" + ) + + return response.json() + + +def _checks_latest_test_status_cte(commit_sha: str, branch_name: str) -> str: + """ + Shared filtering for gh-data.checks: anchor time excludes stateless teardown checks + (Stateless% + test_name not matching ^[0-9]{5}); keep rows with check_start_time + >= anchor so main + teardown phases are included. + """ + return f"""WITH checks_with_anchor AS ( + SELECT + check_name, + test_name, + report_url, + check_status, + test_status, + check_start_time, + maxIf( + check_start_time, + NOT (check_name LIKE 'Stateless%' AND NOT match(test_name, '^[0-9]{{5}}')) + ) OVER (PARTITION BY check_name) AS latest_check_start_time + FROM `gh-data`.checks + WHERE commit_sha = '{commit_sha}' AND head_ref = '{branch_name}' + ), + rows_from_latest_check_run AS ( + SELECT + check_name, + test_name, + report_url, + check_status, + test_status, + check_start_time + FROM checks_with_anchor + WHERE check_start_time >= latest_check_start_time + ), + latest_test_status AS ( + SELECT + argMax(check_status, check_start_time) AS job_status, + check_name AS job_name, + argMax(test_status, check_start_time) AS status, + test_name, + report_url AS results_link + FROM rows_from_latest_check_run + GROUP BY check_name, test_name, report_url + )""" + + +def get_checks_fails(client: Client, commit_sha: str, branch_name: str): + """ + Get tests that did not succeed for the given commit and branch. + Exclude checks that have status 'error' as they are counted in get_checks_errors. + """ + query = f"""{_checks_latest_test_status_cte(commit_sha, branch_name)} + SELECT job_status, job_name, status AS test_status, test_name, results_link + FROM latest_test_status + WHERE test_status IN ('FAIL', 'ERROR') + AND job_status != 'error' + ORDER BY job_name, test_name + """ + return query_dataframe_with_retry(client, query) + + +def get_broken_tests_rules(broken_tests_file_path): + with open(broken_tests_file_path, "r", encoding="utf-8") as broken_tests_file: + broken_tests = yaml.safe_load(broken_tests_file) + + compiled_rules = {"exact": {}, "pattern": {}} + + for test in broken_tests: + regex = test.get("regex") is True + rule = { + "reason": test["reason"], + } + + if test.get("check_types"): + rule["check_types"] = test["check_types"] + + if regex: + rule["regex"] = True + compiled_rules["pattern"][re.compile(test["name"])] = rule + else: + compiled_rules["exact"][test["name"]] = rule + + return compiled_rules + + +def get_known_fail_reason(test_name: str, check_name: str, known_fails: dict): + """ + Returns the reason why a test is known to fail based on its name and build context. + + - Exact-name rules are checked first. + - Pattern-name rules are checked next (first match wins). + - Message/not_message conditions are ignored. + """ + # 1. Exact-name rules + rule_data = known_fails["exact"].get(test_name) + if rule_data: + check_types = rule_data.get("check_types", []) + if not check_types or any( + check_type in check_name for check_type in check_types + ): + return rule_data["reason"] + + # 2. Pattern-name rules + for name_re, rule_data in known_fails["pattern"].items(): + if name_re.fullmatch(test_name): + check_types = rule_data.get("check_types", []) + if not check_types or any( + check_type in check_name for check_type in check_types + ): + return rule_data["reason"] + + return "No reason given" + + +def get_checks_known_fails( + client: Client, commit_sha: str, branch_name: str, known_fails: dict +): + """ + Get tests that are known to fail for the given commit and branch. + """ + if len(known_fails) == 0: + return pd.DataFrame() + + query = f"""{_checks_latest_test_status_cte(commit_sha, branch_name)} + SELECT job_name, status AS test_status, test_name, results_link + FROM latest_test_status + WHERE status = 'BROKEN' + ORDER BY job_name, test_name + """ + + df = query_dataframe_with_retry(client, query) + + if df.shape[0] == 0: + return df + + df.insert( + len(df.columns) - 1, + "reason", + df.apply( + lambda row: get_known_fail_reason( + row["test_name"], row["job_name"], known_fails + ), + axis=1, + ), + ) + + return df + + +def get_checks_errors(client: Client, commit_sha: str, branch_name: str): + """ + Get checks that have status 'error' for the given commit and branch. + """ + query = f"""{_checks_latest_test_status_cte(commit_sha, branch_name)} + SELECT job_status, job_name, status AS test_status, test_name, results_link + FROM latest_test_status + WHERE job_status = 'error' + ORDER BY job_name, test_name + """ + return query_dataframe_with_retry(client, query) + + +def drop_prefix_rows(df, column_to_clean): + """ + Drop rows from the dataframe if: + - the row matches another row completely except for the specified column + - the specified column of that row is a prefix of the same column in another row + """ + to_drop = set() + reference_columns = [col for col in df.columns if col != column_to_clean] + for (i, row_1), (j, row_2) in combinations(df.iterrows(), 2): + if all(row_1[col] == row_2[col] for col in reference_columns): + if row_2[column_to_clean].startswith(row_1[column_to_clean]): + to_drop.add(i) + elif row_1[column_to_clean].startswith(row_2[column_to_clean]): + to_drop.add(j) + return df.drop(to_drop) + + +def get_regression_fails(client: Client, job_url: str): + """ + Get regression tests that did not succeed for the given job URL. + """ + # If you rename the alias for report_url, also update the formatters in format_results_as_html_table + # Nested SELECT handles test reruns + query = f"""SELECT arch, job_name, status, test_name, results_link + FROM ( + SELECT + architecture as arch, + test_name, + argMax(result, start_time) AS status, + job_name, + report_url as results_link, + job_url + FROM `gh-data`.clickhouse_regression_results + GROUP BY architecture, test_name, job_url, job_name, report_url + ORDER BY length(test_name) DESC + ) + WHERE job_url LIKE '{job_url}%' + AND status IN ('Fail', 'Error') + """ + df = query_dataframe_with_retry(client, query) + df = drop_prefix_rows(df, "test_name") + df["job_name"] = df["job_name"].str.title() + return df + + +def get_new_fails_this_pr( + client: Client, + pr_info: dict, + checks_fails: pd.DataFrame, + regression_fails: pd.DataFrame, +): + """ + Get tests that failed in the PR but passed in the base branch. + Compares both checks and regression test results. + Always includes targeted checks (``targeted`` in job_name). + """ + base_sha = pr_info.get("base", {}).get("sha") + if not base_sha: + raise Exception("No base SHA found for PR") + + # Modify tables to have the same columns + if len(checks_fails) > 0: + checks_fails = checks_fails.copy().drop(columns=["job_status"]) + if len(regression_fails) > 0: + regression_fails = regression_fails.copy() + regression_fails["job_name"] = regression_fails.apply( + lambda row: f"{row['arch']} {row['job_name']}".strip(), axis=1 + ) + regression_fails["test_status"] = regression_fails["status"] + + # Combine both types of fails and select only desired columns + desired_columns = ["job_name", "test_name", "test_status", "results_link"] + all_pr_fails = pd.concat([checks_fails, regression_fails], ignore_index=True)[ + desired_columns + ] + if len(all_pr_fails) == 0: + return pd.DataFrame() + + # Get all checks from the base branch that didn't fail + base_checks_query = f"""SELECT job_name, status as test_status, test_name, results_link + FROM ( + SELECT + check_name as job_name, + argMax(test_status, check_start_time) as status, + test_name, + report_url as results_link, + task_url + FROM `gh-data`.checks + WHERE commit_sha='{base_sha}' + GROUP BY check_name, test_name, report_url, task_url + ) + WHERE test_status NOT IN ('FAIL', 'ERROR') + ORDER BY job_name, test_name + """ + base_checks = query_dataframe_with_retry(client, base_checks_query) + + # Get regression results from base branch that didn't fail + base_regression_query = f"""SELECT arch, job_name, status, test_name, results_link + FROM ( + SELECT + architecture as arch, + test_name, + argMax(result, start_time) AS status, + job_url, + job_name, + report_url as results_link + FROM `gh-data`.clickhouse_regression_results + WHERE results_link LIKE'%/{base_sha}/%' + GROUP BY architecture, test_name, job_url, job_name, report_url + ORDER BY length(test_name) DESC + ) + WHERE status NOT IN ('Fail', 'Error') + """ + base_regression = query_dataframe_with_retry(client, base_regression_query) + if len(base_regression) > 0: + base_regression["job_name"] = base_regression.apply( + lambda row: f"{row['arch']} {row['job_name']}".strip(), axis=1 + ) + base_regression["test_status"] = base_regression["status"] + base_regression = base_regression.drop(columns=["arch", "status"]) + + # Combine base results + base_results = pd.concat([base_checks, base_regression], ignore_index=True) + + # Find tests that failed in PR but passed in base + pr_failed_tests = set(zip(all_pr_fails["job_name"], all_pr_fails["test_name"])) + base_passed_tests = set(zip(base_results["job_name"], base_results["test_name"])) + + new_fails = pr_failed_tests.intersection(base_passed_tests) + + # Filter PR results to only include new fails + mask = all_pr_fails.apply( + lambda row: (row["job_name"], row["test_name"]) in new_fails + or "targeted" in row["job_name"], + axis=1, + ) + new_fails_df = all_pr_fails[mask] + + return new_fails_df + + +@lru_cache +def get_workflow_config() -> dict: + + # 25.12+ + if os.path.exists("./ci/tmp/workflow_status.json"): + with open("./ci/tmp/workflow_status.json", "r") as f: + data = json.load(f)["config_workflow"]["outputs"]["data"] + assert data is not None, "data is None" + if isinstance(data, str): + data = json.loads(data) + assert ( + "WORKFLOW_CONFIG" in data.keys() + ), f"WORKFLOW_CONFIG not found in data: {data.keys()}" + return data["WORKFLOW_CONFIG"] + + workflow_config_files = glob("./ci/tmp/workflow_config*.json") + if len(workflow_config_files) == 0: + raise Exception("No workflow config file found") + if len(workflow_config_files) > 1: + raise Exception("Multiple workflow config files found") + with open(workflow_config_files[0], "r") as f: + return json.load(f) + + +def get_cached_job(job_name: str) -> dict: + workflow_config = get_workflow_config() + return workflow_config["cache_jobs"].get(job_name, {}) + + +def get_cves(pr_number, commit_sha, branch): + """ + Fetch Grype results from S3. + + If no results are available for download, returns ... (Ellipsis). + """ + s3_client = boto3.client("s3", endpoint_url=os.getenv("S3_URL")) + prefixes_to_check = set() + + def format_prefix(pr_number, commit_sha, branch): + if pr_number == 0: + return f"REFs/{branch}/{commit_sha}/grype/" + else: + return f"PRs/{pr_number}/{commit_sha}/grype/" + + cached_server_job = get_cached_job("Docker server image") + if cached_server_job: + prefixes_to_check.add( + format_prefix( + cached_server_job["pr_number"], + cached_server_job["sha"], + cached_server_job["branch"], + ) + ) + cached_keeper_job = get_cached_job("Docker keeper image") + if cached_keeper_job: + prefixes_to_check.add( + format_prefix( + cached_keeper_job["pr_number"], + cached_keeper_job["sha"], + cached_keeper_job["branch"], + ) + ) + + if not prefixes_to_check: + prefixes_to_check = {format_prefix(pr_number, commit_sha, branch)} + + grype_result_dirs = [] + for s3_prefix in prefixes_to_check: + try: + response = s3_client.list_objects_v2( + Bucket=S3_BUCKET, Prefix=s3_prefix, Delimiter="/" + ) + grype_result_dirs.extend( + content["Prefix"] + for content in response.get("CommonPrefixes", []) + if isinstance(content, dict) and content.get("Prefix") + ) + except Exception as e: + print(f"Error listing S3 objects at {s3_prefix}: {e}") + continue + + if len(grype_result_dirs) == 0: + # We were asked to check the CVE data, but none was found, + # maybe this is a preview report and grype results are not available yet + return ... + + results = [] + for path in grype_result_dirs: + file_key = f"{path}result.json" + try: + file_response = s3_client.get_object(Bucket=S3_BUCKET, Key=file_key) + content = file_response["Body"].read().decode("utf-8") + results.append(json.loads(content)) + except Exception as e: + print(f"Error getting S3 object at {file_key}: {e}") + continue + + rows = [] + for scan_result in results: + for match in scan_result["matches"]: + rows.append( + { + "docker_image": scan_result["source"]["target"]["userInput"], + "severity": match["vulnerability"]["severity"], + "identifier": match["vulnerability"]["id"], + "namespace": match["vulnerability"]["namespace"], + } + ) + + if len(rows) == 0: + return pd.DataFrame() + + df = pd.DataFrame(rows).drop_duplicates() + + def _cve_sort_key(col): + if col.name == "severity": + return col.str.lower().map(CVE_SEVERITY_ORDER) + return col + + df = df.sort_values(by=["severity", "docker_image"], key=_cve_sort_key) + return df + + +def url_to_html_link(url: str) -> str: + if not url: + return "" + text = url.split("/")[-1].split("?")[0] + if not text: + text = "results" + return f'{text}' + + +def format_test_name_for_linewrap(text: str) -> str: + """Tweak the test name to improve line wrapping.""" + return f'{text}' + + +def format_test_status(text: str) -> str: + """Format the test status for better readability.""" + if text.lower().startswith("fail"): + color = "red" + elif text.lower() == "skipped": + color = "grey" + elif text.lower() in ("success", "ok", "passed", "pass"): + color = "green" + else: + color = "orange" + + return f'{text}' + + +def format_results_as_html_table(results) -> str: + if len(results) == 0: + return "

Nothing to report

" + results.columns = [col.replace("_", " ").title() for col in results.columns] + html = results.to_html( + index=False, + formatters={ + "Results Link": url_to_html_link, + "Test Name": format_test_name_for_linewrap, + "Test Status": format_test_status, + "Job Status": format_test_status, + "Status": format_test_status, + "Message": lambda m: m.replace("\n", " "), + "Identifier": lambda i: url_to_html_link( + "https://nvd.nist.gov/vuln/detail/" + i + ), + "Severity": lambda s: ( + f'{s}' + ), + }, + escape=False, + border=0, + classes=["test-results-table"], + ) + return html + + +def backfill_skipped_statuses( + job_statuses: pd.DataFrame, pr_number: int, branch: str, commit_sha: str +): + """ + Fill in the job statuses for skipped jobs. + """ + + if pr_number == 0: + ref_param = f"REF={branch}" + workflow_name = "MasterCI" + else: + ref_param = f"PR={pr_number}" + workflow_name = "PR" + + status_file = f"result_{workflow_name.lower()}.json" + s3_path = f"https://{S3_BUCKET}.s3.amazonaws.com/{ref_param.replace('=', 's/')}/{commit_sha}/{status_file}" + response = requests.get(s3_path) + + if response.status_code != 200: + return job_statuses + + status_data = response.json() + skipped_jobs = [] + for job in status_data["results"]: + if job["status"] == "skipped" and len(job["links"]) > 0: + skipped_jobs.append( + { + "job_name": job["name"], + "job_status": job["status"], + "message": job["info"], + "results_link": job["links"][0], + } + ) + + return pd.concat([job_statuses, pd.DataFrame(skipped_jobs)], ignore_index=True) + + +def get_build_report_links( + job_statuses: pd.DataFrame, pr_number: int, branch: str, commit_sha: str +): + """ + Get the build report links for the given PR number, branch, and commit SHA. + + First checks if a build job submitted a success or skipped status. + If not available, it guesses the links. + """ + build_job_names = [ + "Build (amd_release)", + "Build (arm_release)", + "Docker server image", + "Docker keeper image", + ] + build_report_links = {} + + for job in job_statuses.itertuples(): + if ( + job.job_name in build_job_names + and job.job_status + in ( + "success", + "skipped", + ) + and job.results_link + ): + build_report_links[job.job_name] = job.results_link + + if 0 < len(build_report_links) < len(build_job_names): + # Only have some of the build jobs, guess the rest. + # (It was straightforward to force the build jobs to always appear in the cache, + # however doing the same for the docker image jobs is difficult.) + ref_job, ref_link = list(build_report_links.items())[0] + link_template = ref_link.replace( + urllib.parse.quote(ref_job, safe=""), "{job_name}" + ) + for job in build_job_names: + if job not in build_report_links: + build_report_links[job] = link_template.format(job_name=job) + + if len(build_report_links) > 0: + return build_report_links + + # No cache or build result was found, guess the links + if pr_number == 0: + ref_param = f"REF={branch}" + workflow_name = "MasterCI" + else: + ref_param = f"PR={pr_number}" + workflow_name = "PR" + + build_report_link_base = f"https://{S3_BUCKET}.s3.amazonaws.com/json.html?{ref_param}&sha={commit_sha}&name_0={urllib.parse.quote(workflow_name, safe='')}" + build_report_links = { + job_name: f"{build_report_link_base}&name_1={urllib.parse.quote(job_name, safe='')}" + for job_name in build_job_names + } + return build_report_links + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Create a combined CI report.") + parser.add_argument( # Need the full URL rather than just the ID to query the databases + "--actions-run-url", required=True, help="URL of the actions run" + ) + parser.add_argument( + "--pr-number", help="Pull request number for the S3 path", type=int + ) + parser.add_argument("--commit-sha", help="Commit SHA for the S3 path") + parser.add_argument( + "--no-upload", action="store_true", help="Do not upload the report" + ) + parser.add_argument( + "--known-fails", type=str, help="Path to the file with known fails" + ) + parser.add_argument( + "--cves", action="store_true", help="Get CVEs from Grype results" + ) + parser.add_argument( + "--mark-preview", action="store_true", help="Mark the report as a preview" + ) + return parser.parse_args() + + +def create_workflow_report( + actions_run_url: str, + pr_number: int = None, + commit_sha: str = None, + no_upload: bool = False, + known_fails_file_path: str = None, + check_cves: bool = False, + mark_preview: bool = False, +) -> str: + + host = os.getenv(DATABASE_HOST_VAR) + if not host: + print(f"{DATABASE_HOST_VAR} is not set") + user = os.getenv(DATABASE_USER_VAR) + if not user: + print(f"{DATABASE_USER_VAR} is not set") + password = os.getenv(DATABASE_PASSWORD_VAR) + if not password: + print(f"{DATABASE_PASSWORD_VAR} is not set") + if not GITHUB_TOKEN: + print("GITHUB_TOKEN is not set") + if not all([host, user, password, GITHUB_TOKEN]): + raise Exception("Required environment variables are not set") + + run_id = actions_run_url.split("/")[-1] + + run_details = get_run_details(run_id) + branch_name = run_details.get("head_branch", "unknown branch") + if pr_number is None or commit_sha is None: + if pr_number is None: + if len(run_details["pull_requests"]) > 0: + pr_number = run_details["pull_requests"][0]["number"] + else: + pr_number = 0 + if commit_sha is None: + commit_sha = run_details["head_commit"]["id"] + + db_client = Client( + host=host, + user=user, + password=password, + port=9440, + secure="y", + verify=False, + settings={"use_numpy": True}, + ) + + fail_results = { + "job_statuses": get_commit_statuses(commit_sha), + "checks_fails": get_checks_fails(db_client, commit_sha, branch_name), + "checks_known_fails": [], + "pr_new_fails": [], + "checks_errors": get_checks_errors(db_client, commit_sha, branch_name), + "regression_fails": get_regression_fails(db_client, actions_run_url), + "docker_images_cves": [], + } + + try: + fail_results["docker_images_cves"] = ( + [] if not check_cves else get_cves(pr_number, commit_sha, branch_name) + ) + except Exception as e: + print(f"Error in get_cves: {e}") + + # get_cves returns ... in the case where no Grype result files were found. + # This might occur when run in preview mode. + cves_not_checked = not check_cves or fail_results["docker_images_cves"] is ... + + if known_fails_file_path: + if not os.path.exists(known_fails_file_path): + print(f"WARNING:Known fails file {known_fails_file_path} not found.") + else: + known_fails = get_broken_tests_rules(known_fails_file_path) + + fail_results["checks_known_fails"] = get_checks_known_fails( + db_client, commit_sha, branch_name, known_fails + ) + + if pr_number == 0: + pr_info_html = f"Release ({branch_name})" + else: + try: + pr_info = get_pr_info_from_number(pr_number) + pr_info_html = f""" + #{pr_info.get("number")} ({pr_info.get("base", {}).get('ref')} <- {pr_info.get("head", {}).get('ref')}) {pr_info.get("title")} + """ + fail_results["pr_new_fails"] = get_new_fails_this_pr( + db_client, + pr_info, + fail_results["checks_fails"], + fail_results["regression_fails"], + ) + except Exception as e: + pr_info_html = e + pr_info = {} + + fail_results["job_statuses"] = backfill_skipped_statuses( + fail_results["job_statuses"], pr_number, branch_name, commit_sha + ) + + high_cve_count = 0 + if not cves_not_checked and len(fail_results["docker_images_cves"]) > 0: + high_cve_count = ( + fail_results["docker_images_cves"]["severity"] + .str.lower() + .isin(("high", "critical")) + .sum() + ) + + # Load the template + template = Environment( + loader=FileSystemLoader(os.path.dirname(__file__)) + ).get_template("ci_run_report.html.jinja") + + # Define the context for rendering + context = { + "title": "ClickHouse® CI Workflow Run Report", + "github_repo": GITHUB_REPO, + "s3_bucket": S3_BUCKET, + "pr_info_html": pr_info_html, + "pr_number": pr_number, + "workflow_id": run_id, + "commit_sha": commit_sha, + "base_sha": "" if pr_number == 0 else pr_info.get("base", {}).get("sha"), + "date": f"{datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')} UTC", + "is_preview": mark_preview, + "counts": { + "jobs_status": f"{sum(fail_results['job_statuses']['job_status'].value_counts().get(x, 0) for x in ('failure', 'error'))} fail/error", + "checks_errors": len(fail_results["checks_errors"]), + "checks_new_fails": len(fail_results["checks_fails"]), + "regression_new_fails": len(fail_results["regression_fails"]), + "cves": "N/A" if cves_not_checked else f"{high_cve_count} high/critical", + "checks_known_fails": ( + "N/A" if not known_fails else len(fail_results["checks_known_fails"]) + ), + "pr_new_fails": len(fail_results["pr_new_fails"]), + }, + "build_report_links": get_build_report_links( + fail_results["job_statuses"], pr_number, branch_name, commit_sha + ), + "ci_jobs_status_html": format_results_as_html_table( + fail_results["job_statuses"] + ), + "checks_errors_html": format_results_as_html_table( + fail_results["checks_errors"] + ), + "checks_fails_html": format_results_as_html_table(fail_results["checks_fails"]), + "regression_fails_html": format_results_as_html_table( + fail_results["regression_fails"] + ), + "docker_images_cves_html": ( + "

Not Checked

" + if cves_not_checked + else format_results_as_html_table(fail_results["docker_images_cves"]) + ), + "checks_known_fails_html": ( + "

Not Checked

" + if not known_fails + else format_results_as_html_table(fail_results["checks_known_fails"]) + ), + "new_fails_html": format_results_as_html_table(fail_results["pr_new_fails"]), + } + + # Render the template with the context + rendered_html = template.render(context) + + report_name = "ci_run_report.html" + report_path = Path(report_name) + report_path.write_text(rendered_html, encoding="utf-8") + + if no_upload: + print(f"Report saved to {report_path}") + exit(0) + + if pr_number == 0: + report_destination_key = f"REFs/{branch_name}/{commit_sha}" + else: + report_destination_key = f"PRs/{pr_number}/{commit_sha}" + + report_destination_key += f"/{run_id}/{report_name}" + + # Upload the report to S3 + s3_client = boto3.client("s3", endpoint_url=os.getenv("S3_URL")) + + try: + s3_client.put_object( + Bucket=S3_BUCKET, + Key=report_destination_key, + Body=rendered_html, + ContentType="text/html; charset=utf-8", + ) + except NoCredentialsError: + print("Credentials not available for S3 upload.") + + return f"https://s3.amazonaws.com/{S3_BUCKET}/" + report_destination_key + + +def main(): + args = parse_args() + + report_url = create_workflow_report( + args.actions_run_url, + args.pr_number, + args.commit_sha, + args.no_upload, + args.known_fails, + args.cves, + args.mark_preview, + ) + + print(report_url) + + +if __name__ == "__main__": + main() diff --git a/.github/actions/create_workflow_report/test_report_queries.py b/.github/actions/create_workflow_report/test_report_queries.py new file mode 100755 index 000000000000..89c7f6d4bb41 --- /dev/null +++ b/.github/actions/create_workflow_report/test_report_queries.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +""" +Test cases for report queries: ``commit_sha``, ``head_ref``, and ``expect``. + +``expect`` is the exact set of ``test_name`` values the query must return (order +ignored). Use an empty ``expect`` tuple when it must return no failures. + +Requires ``testflows`` and ``clickhouse_driver``. Run: + + pip install testflows + python .github/actions/create_workflow_report/test_report_queries.py + +Set ``CHECKS_DATABASE_HOST``, ``CLICKHOUSE_TEST_STAT_LOGIN``, +``CLICKHOUSE_TEST_STAT_PASSWORD``. +""" + +import os + +import pandas as pd +from clickhouse_driver import Client +from testflows.core import * + +from create_workflow_report import get_checks_fails + + +def check_result_matches_expect(df: pd.DataFrame, expect: list[str]) -> None: + """Result ``test_name`` values must match ``expect`` exactly (as a set).""" + if "test_name" not in df.columns and not df.empty: + fail(f"DataFrame has no test_name column: {df.columns.tolist()}") + actual = set(df["test_name"].tolist()) if not df.empty else set() + required = set(expect) + if actual != required: + fail(f"test_name mismatch: got {sorted(actual)}; required {sorted(required)}") + + +@TestOutline(Scenario) +@Examples( + "case_id commit_sha head_ref expect", + [ + ( + "tests_passing_no_reruns", + "088fb0351680a67f6f34b4ad56ca12030012c919", + "qa/update-broken-tests", + (), + ), + ( + "stress_startup_failed_no_reruns", + "33fd024890a401d30d85b3fad656f9deba916cbc", + "antalya-25.8", + ( + "Cannot start clickhouse-server", + "Server failed to start (see application_errors.txt and clickhouse-server.clean.log)", + ), + ), + ( + "stateless_and_integration_failed_no_reruns", + "4edabbad8665e1c727bbd7c891e0cbcd81aefd56", + "antalya-25.8", + ( + "test_dns_cache/test.py::test_user_access_ip_change[node5]", + "01042_check_query_and_last_granule_size", + ), + ), + ( + "stateless_fail_in_teardown_no_reruns", + "0cd90a87ab7b2ad83d24df87d96af5d3de7858c2", + "feature/antalya-26.1/json_part2", + ( + "00411_long_accurate_number_comparison_int4", + "03572_export_merge_tree_part_limits_and_table_functions", + "Exception in test runner", + "Some queries hung", + ), + ), + ( + "stateless_and_integration_passed_after_reruns", + "49bb3f7beb5e6e424a1e94c749478fd23a8e6196", + "antalya-25.8", + (), + ), + ( + "stress_passed_after_reruns", + "51762a72207f3d4bcee51a0a78912a8b2cbb1bb5", + "antalya-25.8", + (), + ), + ], +) +def test_checks_fails_query(self, case_id, commit_sha, head_ref, expect): + """Test checks fails query for one commit.""" + with Given("DB Client is configured"): + host = os.getenv("CHECKS_DATABASE_HOST") + user = os.getenv("CLICKHOUSE_TEST_STAT_LOGIN") + password = os.getenv("CLICKHOUSE_TEST_STAT_PASSWORD") + if not all([host, user, password]): + skip( + "Set CHECKS_DATABASE_HOST, CLICKHOUSE_TEST_STAT_LOGIN, " + "CLICKHOUSE_TEST_STAT_PASSWORD" + ) + client = Client( + host=host, + user=user, + password=password, + port=9440, + secure="y", + verify=False, + settings={"use_numpy": True}, + ) + + with When( + f"I call get_checks_fails for commit_sha={commit_sha!r} head_ref={head_ref!r}" + ): + df = get_checks_fails(client, commit_sha, head_ref) + + with Then("result test_name set equals expect"): + check_result_matches_expect(df, list(expect)) + + +@Name("test report queries") +@TestModule +def test_report_queries(self): + Scenario(run=test_checks_fails_query, flags=TE) + + +if main(): + test_report_queries() diff --git a/.github/actions/create_workflow_report/workflow_report_hook.sh b/.github/actions/create_workflow_report/workflow_report_hook.sh new file mode 100755 index 000000000000..04a09a9ee3ca --- /dev/null +++ b/.github/actions/create_workflow_report/workflow_report_hook.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# This script is for generating preview reports when invoked as a post-hook from a praktika job +pip install clickhouse-driver==0.2.8 numpy==1.26.4 pandas==2.0.3 jinja2==3.1.5 +ARGS="--mark-preview --known-fails tests/broken_tests.yaml --cves --actions-run-url $GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID --pr-number $PR_NUMBER" +CMD="python3 .github/actions/create_workflow_report/create_workflow_report.py" +$CMD $ARGS + diff --git a/.github/actions/docker_setup/action.yml b/.github/actions/docker_setup/action.yml new file mode 100644 index 000000000000..56f713fa59d1 --- /dev/null +++ b/.github/actions/docker_setup/action.yml @@ -0,0 +1,32 @@ +name: Docker setup +description: Setup docker +inputs: + test_name: + description: name of the test, used in determining ipv6 configs. + default: None + type: string +runs: + using: "composite" + steps: + - name: Docker IPv6 configuration + shell: bash + if: ${{ contains(inputs.test_name, 'Stateless') }} + env: + ipv6_subnet: 2001:3984:3989::/64 + run: | + # make sure docker uses proper IPv6 config + sudo touch /etc/docker/daemon.json + sudo chown ubuntu:ubuntu /etc/docker/daemon.json + sudo cat < /etc/docker/daemon.json + { + "ipv6": true, + "fixed-cidr-v6": "${{ env.ipv6_subnet }}" + } + EOT + sudo chown root:root /etc/docker/daemon.json + sudo systemctl restart docker + sudo systemctl status docker + - name: Docker info + shell: bash + run: | + docker info diff --git a/.github/actions/runner_setup/action.yml b/.github/actions/runner_setup/action.yml new file mode 100644 index 000000000000..5a229fdd47e7 --- /dev/null +++ b/.github/actions/runner_setup/action.yml @@ -0,0 +1,19 @@ +name: Setup +description: Setup environment +runs: + using: "composite" + steps: + - name: Setup zram + shell: bash + run: | + sudo modprobe zram + MemTotal=$(grep -Po "(?<=MemTotal:)\s+\d+" /proc/meminfo) # KiB + Percent=200 + ZRAM_SIZE=$(($MemTotal / 1024 / 1024 * $Percent / 100)) # Convert to GiB + .github/retry.sh 30 2 sudo zramctl --size ${ZRAM_SIZE}GiB --algorithm zstd /dev/zram0 + sudo mkswap /dev/zram0 && sudo swapon -p 100 /dev/zram0 + sudo sysctl vm.swappiness=200 + - name: Install awscli + shell: bash + run: | + .github/retry.sh 10 30 sudo apt-get install -y awscli diff --git a/.github/dco.yml b/.github/dco.yml new file mode 100644 index 000000000000..a0a2aae8f55a --- /dev/null +++ b/.github/dco.yml @@ -0,0 +1,17 @@ +# The configuration file must be named `dco.yml` and placed in the `.github` directory in the default branch of the repository. +# +# This configuration file is backwards compatible with the *dcoapp/app* (https://github.com/dcoapp/app) configuration file. + +# https://github.com/cncf/dco2?#remediation-commits +allowRemediationCommits: + # Allow individual remediation commits + # https://github.com/cncf/dco2?#individual + individual: true + # Allow third-party remediation commits + # https://github.com/cncf/dco2?#third-party + thirdParty: false + +require: + # Members are required to sign-off commits + # https://github.com/cncf/dco2?#skipping-sign-off-for-organization-members + members: false diff --git a/.github/grype/parse_vulnerabilities_grype.py b/.github/grype/parse_vulnerabilities_grype.py new file mode 100644 index 000000000000..fec2ef3bfac7 --- /dev/null +++ b/.github/grype/parse_vulnerabilities_grype.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +import json + +from testflows.core import * + +xfails = {} + + +@Name("docker vulnerabilities") +@XFails(xfails) +@TestModule +def docker_vulnerabilities(self): + with Given("I gather grype scan results"): + with open("./result.json", "r") as f: + results = json.load(f) + + for vulnerability in results["matches"]: + with Test( + f"{vulnerability['vulnerability']['id']}@{vulnerability['vulnerability']['namespace']},{vulnerability['vulnerability']['severity']}", + flags=TE, + ): + note(vulnerability) + critical_levels = set(["HIGH", "CRITICAL"]) + if vulnerability['vulnerability']["severity"].upper() in critical_levels: + with Then( + f"Found vulnerability of {vulnerability['vulnerability']['severity']} severity" + ): + result(Fail) + + +if main(): + docker_vulnerabilities() diff --git a/.github/grype/run_grype_scan.sh b/.github/grype/run_grype_scan.sh new file mode 100755 index 000000000000..af428e37d669 --- /dev/null +++ b/.github/grype/run_grype_scan.sh @@ -0,0 +1,18 @@ +set -x +set -e + +IMAGE=$1 + +GRYPE_VERSION=${GRYPE_VERSION:-"v0.92.2"} + +docker pull $IMAGE +docker pull anchore/grype:${GRYPE_VERSION} + +docker run \ + --rm --volume /var/run/docker.sock:/var/run/docker.sock \ + --name Grype anchore/grype:${GRYPE_VERSION} \ + --scope all-layers \ + -o json \ + $IMAGE > result.json + +ls -sh diff --git a/.github/grype/transform_and_upload_results_s3.sh b/.github/grype/transform_and_upload_results_s3.sh new file mode 100755 index 000000000000..38674d7a2a26 --- /dev/null +++ b/.github/grype/transform_and_upload_results_s3.sh @@ -0,0 +1,20 @@ +DOCKER_IMAGE=$(echo "$DOCKER_IMAGE" | sed 's/[\/:]/_/g') + +if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA/grype/$DOCKER_IMAGE" +else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA/grype/$DOCKER_IMAGE" +fi + +S3_PATH="s3://$S3_BUCKET/$PREFIX" +HTTPS_RESULTS_PATH="https://$S3_BUCKET.s3.amazonaws.com/index.html#$PREFIX/" +HTTPS_REPORT_PATH="https://s3.amazonaws.com/$S3_BUCKET/$PREFIX/results.html" +echo "https_report_path=$HTTPS_REPORT_PATH" >> $GITHUB_OUTPUT + +tfs --no-colors transform nice raw.log nice.log.txt +tfs --no-colors report results -a $HTTPS_RESULTS_PATH raw.log - --copyright "Altinity LTD" | tfs --no-colors document convert > results.html + +aws s3 cp --no-progress nice.log.txt $S3_PATH/nice.log.txt --content-type "text/plain; charset=utf-8" || echo "nice log file not found". +aws s3 cp --no-progress results.html $S3_PATH/results.html || echo "results file not found". +aws s3 cp --no-progress raw.log $S3_PATH/raw.log || echo "raw.log file not found". +aws s3 cp --no-progress result.json $S3_PATH/result.json --content-type "text/plain; charset=utf-8" || echo "result.json not found". \ No newline at end of file diff --git a/.github/retry.sh b/.github/retry.sh new file mode 100755 index 000000000000..566c2cf11315 --- /dev/null +++ b/.github/retry.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Execute command until exitcode is 0 or +# maximum number of retries is reached +# Example: +# ./retry +retries=$1 +delay=$2 +command="${@:3}" +exitcode=0 +try=0 +until [ "$try" -ge $retries ] +do + echo "$command" + eval "$command" + exitcode=$? + if [ $exitcode -eq 0 ]; then + break + fi + try=$((try+1)) + sleep $2 +done +exit $exitcode diff --git a/.github/workflows/README.md b/.github/workflows/README.md new file mode 100644 index 000000000000..56415c2a7478 --- /dev/null +++ b/.github/workflows/README.md @@ -0,0 +1,13 @@ +## Scheduled Build Run Results + +Results for **the latest** release_workflow scheduled runs. + +| Branch | Status | +| ------------ | - | +| **`antalya`** | [![antalya](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=antalya&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Aantalya) | +| **`project-antalya-24.12.2`** | [![project-antalya-24.12.2](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=project-antalya-24.12.2&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Aproject-antalya-24.12.2) | +| **`customizations/22.8.21`** | [![customizations/22.8.21](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/22.8.21&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/22.8.21) | +| **`customizations/23.3.19`** | [![customizations/23.3.19](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/23.3.19&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/23.3.19) | +| **`customizations/23.8.16`** | [![customizations/23.8.16](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/23.8.16&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/23.8.16) | +| **`customizations/24.3.14`** | [![customizations/24.3.14](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/24.3.14&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/24.3.14) | +| **`customizations/24.8.11`** | [![customizations/24.8.11](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml/badge.svg?branch=customizations/24.8.11&event=workflow_dispatch)](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/24.8.11) | diff --git a/.github/workflows/auto_releases.yml b/.github/workflows/auto_releases.yml deleted file mode 100644 index 3898ddbb8d16..000000000000 --- a/.github/workflows/auto_releases.yml +++ /dev/null @@ -1,99 +0,0 @@ -name: AutoReleases - -env: - PYTHONUNBUFFERED: 1 - -concurrency: - group: autoreleases - -on: - schedule: - - cron: '45 11 * * *' - workflow_dispatch: - inputs: - dry-run: - description: 'Dry run' - required: false - default: false - type: boolean - -jobs: - AutoReleaseInfo: - runs-on: [self-hosted, release-maker] - outputs: - data: ${{ steps.info.outputs.AUTO_RELEASE_PARAMS }} - dry_run: ${{ steps.info.outputs.DRY_RUN }} - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - ROBOT_CLICKHOUSE_SSH_KEY<> "$GITHUB_ENV" - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - fetch-depth: 0 # full history needed - - name: Debug Info - uses: ./.github/actions/debug - - name: Prepare Info - id: info - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 auto_release.py --prepare - echo "::group::Auto Release Info" - python3 -m json.tool /tmp/autorelease_info.json - echo "::endgroup::" - { - echo 'AUTO_RELEASE_PARAMS<> "$GITHUB_OUTPUT" - if [[ "${{ github.event_name }}" == "schedule" ]]; then - echo "DRY_RUN=false" >> "$GITHUB_OUTPUT" - else - echo "DRY_RUN=${{ github.event.inputs.dry-run }}" >> "$GITHUB_OUTPUT" - fi - - name: Post Release Branch statuses - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 auto_release.py --post-status - - name: Clean up - uses: ./.github/actions/clean - - Releases: - needs: AutoReleaseInfo - strategy: - matrix: - release_params: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases }} - max-parallel: 1 - name: Release ${{ matrix.release_params.release_branch }} - uses: ./.github/workflows/create_release.yml - with: - ref: ${{ matrix.release_params.commit_sha }} - type: patch - dry-run: ${{ fromJson(needs.AutoReleaseInfo.outputs.dry_run) }} - secrets: - ROBOT_CLICKHOUSE_COMMIT_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} - - CleanUp: - needs: [Releases] - runs-on: [self-hosted, release-maker] - steps: - - uses: ./.github/actions/clean - with: - images: true - -# PostSlackMessage: -# needs: [Releases] -# runs-on: [self-hosted, release-maker] -# if: ${{ !cancelled() }} -# steps: -# - name: Check out repository code -# uses: ClickHouse/checkout@v1 -# - name: Post -# run: | -# cd "$GITHUB_WORKSPACE/tests/ci" -# python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }} diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml deleted file mode 100644 index cc3b3701555a..000000000000 --- a/.github/workflows/backport_branches.yml +++ /dev/null @@ -1,1272 +0,0 @@ -# generated by praktika - -name: BackportPR - -on: - pull_request: - branches: ['2[1-9].[1-9][0-9]', '2[1-9].[1-9]'] - -env: - # Force the stdout and stderr streams to be unbuffered - PYTHONUNBUFFERED: 1 - DISABLE_CI_MERGE_COMMIT: ${{ vars.DISABLE_CI_MERGE_COMMIT || '0' }} - DISABLE_CI_CACHE: ${{ vars.DISABLE_CI_CACHE || '0' }} - CHECKOUT_REF: ${{ vars.DISABLE_CI_MERGE_COMMIT == '1' && github.event.pull_request.head.sha || '' }} - -# Allow updating GH commit statuses and PR comments to post an actual job reports link -permissions: write-all - -jobs: - - config_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [] - name: "Config Workflow" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Config Workflow' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - dockers_build_amd: - runs-on: [self-hosted, style-checker] - needs: [config_workflow] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} - name: "Dockers Build (amd)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Dockers Build (amd)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} - name: "Dockers Build (arm)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Dockers Build (arm)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - build_amd_debug: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }} - name: "Build (amd_debug)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_debug)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - build_amd_asan: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }} - name: "Build (amd_asan)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_asan)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - build_amd_tsan: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }} - name: "Build (amd_tsan)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_tsan)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - build_amd_release: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }} - name: "Build (amd_release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_release)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - build_arm_release: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }} - name: "Build (arm_release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_release)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - build_amd_darwin: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }} - name: "Build (amd_darwin)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_darwin)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - build_arm_darwin: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }} - name: "Build (arm_darwin)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_darwin)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - docker_server_image: - runs-on: [self-hosted, style-checker] - needs: [build_amd_release, build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} - name: "Docker server image" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Docker server image' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - docker_keeper_image: - runs-on: [self-hosted, style-checker] - needs: [build_amd_release, build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} - name: "Docker keeper image" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Docker keeper image' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - install_packages_amd_release: - runs-on: [self-hosted, style-checker] - needs: [build_amd_release, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX3JlbGVhc2Up') }} - name: "Install packages (amd_release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Install packages (amd_release)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - install_packages_arm_release: - runs-on: [self-hosted, style-checker-aarch64] - needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYXJtX3JlbGVhc2Up') }} - name: "Install packages (arm_release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Install packages (arm_release)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - compatibility_check_amd_release: - runs-on: [self-hosted, style-checker] - needs: [build_amd_release, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYW1kX3JlbGVhc2Up') }} - name: "Compatibility check (amd_release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Compatibility check (amd_release)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - compatibility_check_arm_release: - runs-on: [self-hosted, style-checker-aarch64] - needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYXJtX3JlbGVhc2Up') }} - name: "Compatibility check (arm_release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Compatibility check (arm_release)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_asan_distributed_plan_parallel_1_2: - runs-on: [self-hosted, amd-medium-cpu] - needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }} - name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_asan_distributed_plan_parallel_2_2: - runs-on: [self-hosted, amd-medium-cpu] - needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }} - name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_asan_db_disk_distributed_plan_sequential: - runs-on: [self-hosted, amd-small-mem] - needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGIgZGlzaywgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }} - name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_asan, db disk, distributed plan, sequential)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stress_test_amd_tsan: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }} - name: "Stress test (amd_tsan)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_tsan)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_asan_db_disk_old_analyzer_1_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 1/6)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_asan_db_disk_old_analyzer_2_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 2/6)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_asan_db_disk_old_analyzer_3_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 3/6)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_asan_db_disk_old_analyzer_4_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 4/6)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_asan_db_disk_old_analyzer_5_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 5/6)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_asan_db_disk_old_analyzer_6_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 6/6)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_tsan_1_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }} - name: "Integration tests (amd_tsan, 1/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_tsan_2_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }} - name: "Integration tests (amd_tsan, 2/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_tsan_3_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }} - name: "Integration tests (amd_tsan, 3/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_tsan_4_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }} - name: "Integration tests (amd_tsan, 4/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_tsan_5_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }} - name: "Integration tests (amd_tsan, 5/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_tsan_6_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }} - name: "Integration tests (amd_tsan, 6/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [build_amd_asan, build_amd_darwin, build_amd_debug, build_amd_release, build_amd_tsan, build_arm_darwin, build_arm_release, compatibility_check_amd_release, compatibility_check_arm_release, config_workflow, docker_keeper_image, docker_server_image, dockers_build_amd, dockers_build_arm, install_packages_amd_release, install_packages_arm_release, integration_tests_amd_asan_db_disk_old_analyzer_1_6, integration_tests_amd_asan_db_disk_old_analyzer_2_6, integration_tests_amd_asan_db_disk_old_analyzer_3_6, integration_tests_amd_asan_db_disk_old_analyzer_4_6, integration_tests_amd_asan_db_disk_old_analyzer_5_6, integration_tests_amd_asan_db_disk_old_analyzer_6_6, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, stateless_tests_amd_asan_db_disk_distributed_plan_sequential, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stress_test_amd_tsan] - if: ${{ always() }} - name: "Finish Workflow" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Finish Workflow' --workflow "BackportPR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log diff --git a/.github/workflows/cancel.yml b/.github/workflows/cancel.yml new file mode 100644 index 000000000000..c1e11ef212cd --- /dev/null +++ b/.github/workflows/cancel.yml @@ -0,0 +1,19 @@ +name: Cancel + +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + +on: # yamllint disable-line rule:truthy + workflow_run: + workflows: ["PR","PullRequestCI", "ReleaseBranchCI", "DocsCheck", "BackportPR"] + types: + - requested +jobs: + cancel: + runs-on: ubuntu-latest + steps: + - uses: styfle/cancel-workflow-action@0.9.1 + with: + all_but_latest: true + workflow_id: ${{ github.event.workflow.id }} diff --git a/.github/workflows/cherry_pick.yml b/.github/workflows/cherry_pick.yml deleted file mode 100644 index 315673d4abcc..000000000000 --- a/.github/workflows/cherry_pick.yml +++ /dev/null @@ -1,47 +0,0 @@ -name: CherryPick - -env: - # Force the stdout and stderr streams to be unbuffered - PYTHONUNBUFFERED: 1 - -concurrency: - group: cherry-pick -on: # yamllint disable-line rule:truthy - schedule: - - cron: '0 * * * *' - workflow_dispatch: - -jobs: - CherryPick: - runs-on: [self-hosted, style-checker-aarch64] - steps: - - name: Set envs - # https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/cherry_pick - ROBOT_CLICKHOUSE_SSH_KEY<> $GITHUB_OUTPUT + echo "PREVIOUS_TAG_COMMIT=$PREVIOUS_TAG_COMMIT" >> $GITHUB_OUTPUT + echo "UPSTREAM_TAG=$UPSTREAM_TAG" >> $GITHUB_OUTPUT + echo "UPSTREAM_TAG_COMMIT=$UPSTREAM_TAG_COMMIT" >> $GITHUB_OUTPUT + echo "CURRENT_TAG=$CURRENT_TAG" >> $GITHUB_OUTPUT + - name: Comparison report + if: ${{ !cancelled() }} + run: | + git clone https://github.com/Altinity/actions.git + cd actions + git checkout 4623f919ee2738bea69aad405879562476736932 + python3 scripts/compare_ci_fails.py \ + --current-ref ${{ steps.default_refs.outputs.CURRENT_TAG || inputs.current_ref || github.sha }} \ + --previous-ref ${{ steps.default_refs.outputs.PREVIOUS_TAG || inputs.previous_ref || steps.default_refs.outputs.PREVIOUS_TAG_COMMIT }} \ + --upstream-ref ${{ steps.default_refs.outputs.UPSTREAM_TAG || inputs.upstream_ref || steps.default_refs.outputs.UPSTREAM_TAG_COMMIT }} \ + ${{ inputs.include_broken && '--broken' || '' }} + cat comparison_results.md >> $GITHUB_STEP_SUMMARY + + - name: Upload comparison results + uses: actions/upload-artifact@v4 + with: + name: comparison-results + path: | + actions/comparison_results.md diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml deleted file mode 100644 index 6a5e613f5648..000000000000 --- a/.github/workflows/create_release.yml +++ /dev/null @@ -1,357 +0,0 @@ -name: CreateRelease - -concurrency: - group: release - -env: - PYTHONUNBUFFERED: 1 - -'on': - workflow_dispatch: - inputs: - ref: - description: 'Git reference (branch or commit sha) from which to create the release' - required: true - type: string - type: - description: 'The type of release: "new" for a new release or "patch" for a patch release' - required: true - type: choice - options: - - patch - - new - only-repo: - description: 'Run only repos updates including docker (repo-recovery, tests)' - required: false - default: false - type: boolean - only-docker: - description: 'Run only docker builds (repo-recovery, tests)' - required: false - default: false - type: boolean - dry-run: - description: 'Dry run' - required: false - default: false - type: boolean - workflow_call: - inputs: - ref: - description: 'Git reference (branch or commit sha) from which to create the release' - required: true - type: string - type: - description: 'The type of release: "new" for a new release or "patch" for a patch release' - required: true - type: string - only-repo: - description: 'Run only repos updates including docker (repo-recovery, tests)' - required: false - default: false - type: boolean - only-docker: - description: 'Run only docker builds (repo-recovery, tests)' - required: false - default: false - type: boolean - dry-run: - description: 'Dry run' - required: false - default: false - type: boolean - secrets: - ROBOT_CLICKHOUSE_COMMIT_TOKEN: - -jobs: - CreateRelease: - env: - GH_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} - runs-on: [self-hosted, release-maker] - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}} - fetch-depth: 0 - - name: Debug Info - uses: ./.github/actions/debug - - name: Prepare Release Info - shell: bash - run: | - if [ ${{ inputs.only-repo }} == "true" ] || [ ${{ inputs.only-docker }} == "true" ]; then - git tag -l ${{ inputs.ref }} || { echo "With only-repo/docker option ref must be a valid release tag"; exit 1; } - fi - python3 ./tests/ci/create_release.py --prepare-release-info \ - --ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \ - ${{ inputs.dry-run == true && '--dry-run' || '' }} \ - ${{ (inputs.only-repo == true || inputs.only-docker == true) && '--skip-tag-check' || '' }} - echo "::group::Release Info" - python3 -m json.tool /tmp/release_info.json - echo "::endgroup::" - release_tag=$(jq -r '.release_tag' /tmp/release_info.json) - commit_sha=$(jq -r '.commit_sha' /tmp/release_info.json) - is_latest=$(jq -r '.latest' /tmp/release_info.json) - echo "Release Tag: $release_tag" - echo "RELEASE_TAG=$release_tag" >> "$GITHUB_ENV" - echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV" - if [ "$is_latest" == "true" ]; then - echo "DOCKER_TAG_TYPE=release-latest" >> "$GITHUB_ENV" - echo "IS_LATEST=1" >> "$GITHUB_ENV" - else - echo "DOCKER_TAG_TYPE=release" >> "$GITHUB_ENV" - echo "IS_LATEST=0" >> "$GITHUB_ENV" - fi - - name: Download All Release Artifacts - if: ${{ inputs.type == 'patch' && ! inputs.only-docker }} - shell: bash - run: | - python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Push Git Tag for the Release - if: ${{ ! inputs.only-repo && ! inputs.only-docker }} - shell: bash - run: | - python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Push New Release Branch - if: ${{ inputs.type == 'new' && ! inputs.only-repo && ! inputs.only-docker }} - shell: bash - run: | - python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Bump CH Version and Update Contributors' List - if: ${{ ! inputs.only-repo && ! inputs.only-docker }} - shell: bash - run: | - python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Bump Docker versions, Changelog, Security - if: ${{ inputs.type == 'patch' && ! inputs.only-repo && ! inputs.only-docker }} - shell: bash - run: | - python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security" - - # git checkout master # in case WF started from feature branch - echo "List versions" - ./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv - echo "Update docker version" - ./utils/list-versions/update-docker-version.sh - echo "Generate ChangeLog" - export CI=1 - docker pull clickhouse/style-test:latest - docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 -e CI=1 --network=host \ - --volume=".:/wd" --workdir="/wd" \ - clickhouse/style-test:latest \ - ./tests/ci/changelog.py -v --debug-helpers \ - --gh-user-or-token ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} \ - --jobs=5 \ - --output="./docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }} - git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md - echo "Generate Security" - python3 ./utils/security-generator/generate_security.py > SECURITY.md - git diff HEAD - - name: Create ChangeLog PR - if: ${{ inputs.type == 'patch' && ! inputs.dry-run && ! inputs.only-repo && ! inputs.only-docker }} - uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c # v6 - with: - author: "robot-clickhouse " - token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} - committer: "robot-clickhouse " - commit-message: Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }} - branch: auto/${{ env.RELEASE_TAG }} - base: master - assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher - delete-branch: true - title: Update version_date.tsv and changelog after ${{ env.RELEASE_TAG }} - labels: do not test - body: | - Update version_date.tsv and changelogs after ${{ env.RELEASE_TAG }} - ### Changelog category (leave one): - - Not for changelog (changelog entry is not required) - - name: Complete previous steps and Restore git state - if: ${{ inputs.type == 'patch' && ! inputs.only-repo && ! inputs.only-docker }} - shell: bash - run: | - git reset --hard HEAD - git checkout "$GITHUB_REF_NAME" - python3 ./tests/ci/create_release.py --set-progress-completed - - name: Create GH Release - if: ${{ inputs.type == 'patch' && ! inputs.only-repo && ! inputs.only-docker}} - shell: bash - run: | - python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Export TGZ Packages - if: ${{ inputs.type == 'patch' && ! inputs.only-docker }} - shell: bash - run: | - python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Test TGZ Packages - if: ${{ inputs.type == 'patch' && ! inputs.only-docker }} - shell: bash - run: | - python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Export RPM Packages - if: ${{ inputs.type == 'patch' && ! inputs.only-docker }} - shell: bash - run: | - python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Test RPM Packages - if: ${{ inputs.type == 'patch' && ! inputs.only-docker }} - shell: bash - run: | - python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Export Debian Packages - if: ${{ inputs.type == 'patch' && ! inputs.only-docker }} - shell: bash - run: | - python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Test Debian Packages - if: ${{ inputs.type == 'patch' && ! inputs.only-docker }} - shell: bash - run: | - python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Docker clickhouse/clickhouse-server building - if: ${{ inputs.type == 'patch' && inputs.dry-run != true }} - shell: bash - run: | - cd "./tests/ci" - python3 ./create_release.py --set-progress-started --progress "docker server release" - export DOCKER_IMAGE="clickhouse/clickhouse-server" - - # We must use docker file from the release commit - git checkout "${{ env.RELEASE_TAG }}" - python3 ./version_helper.py --export > /tmp/version.sh - . /tmp/version.sh - - if [[ $CLICKHOUSE_VERSION_STRING =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then - echo "ClickHouse version: $CLICKHOUSE_VERSION_STRING" - else - echo "Invalid version string: $CLICKHOUSE_VERSION_STRING" - exit 1 - fi - CLICKHOUSE_VERSION_MINOR=${CLICKHOUSE_VERSION_STRING%.*} - CLICKHOUSE_VERSION_MAJOR=${CLICKHOUSE_VERSION_MINOR%.*} - - # Define build configurations - configs=( - "ubuntu:../../docker/server/Dockerfile.ubuntu" - "alpine:../../docker/server/Dockerfile.alpine" - ) - - for config in "${configs[@]}"; do - # Split the config into variant and Dockerfile path - variant=${config%%:*} - dockerfile=${config##*:} - - VERSION_SUFFIX=$([ "$variant" = "ubuntu" ] && echo "" || echo "-$variant") - LABEL_VERSION="${CLICKHOUSE_VERSION_STRING}${VERSION_SUFFIX}" - TAGS=( - "--tag=${DOCKER_IMAGE}:${CLICKHOUSE_VERSION_STRING}${VERSION_SUFFIX}" - "--tag=${DOCKER_IMAGE}:${CLICKHOUSE_VERSION_MINOR}${VERSION_SUFFIX}" - "--tag=${DOCKER_IMAGE}:${CLICKHOUSE_VERSION_MAJOR}${VERSION_SUFFIX}" - ) - - if [ "$IS_LATEST" = "1" ]; then - TAGS+=("--tag=${DOCKER_IMAGE}:latest${VERSION_SUFFIX}") - fi - - echo "Following tags will be created: ${TAGS[*]}" - - # shellcheck disable=SC2086,SC2048 - docker buildx build \ - --platform=linux/amd64,linux/arm64 \ - --provenance=true \ - --sbom=true \ - --output=type=registry \ - --label=com.clickhouse.build.version="$LABEL_VERSION" \ - ${TAGS[*]} \ - --build-arg=VERSION="$CLICKHOUSE_VERSION_STRING" \ - --progress=plain \ - --file="$dockerfile" \ - ../../docker/server - done - - git checkout - - python3 ./create_release.py --set-progress-completed - - name: Docker clickhouse/clickhouse-keeper building - if: ${{ inputs.type == 'patch' && inputs.dry-run != true }} - shell: bash - run: | - cd "./tests/ci" - python3 ./create_release.py --set-progress-started --progress "docker keeper release" - - export DOCKER_IMAGE="clickhouse/clickhouse-keeper" - - # We must use docker file from the release commit - git checkout "${{ env.RELEASE_TAG }}" - python3 ./version_helper.py --export > /tmp/version.sh - . /tmp/version.sh - - if [[ $CLICKHOUSE_VERSION_STRING =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then - echo "ClickHouse version: $CLICKHOUSE_VERSION_STRING" - else - echo "Invalid version string: $CLICKHOUSE_VERSION_STRING" - exit 1 - fi - CLICKHOUSE_VERSION_MINOR=${CLICKHOUSE_VERSION_STRING%.*} - CLICKHOUSE_VERSION_MAJOR=${CLICKHOUSE_VERSION_MINOR%.*} - - # Define build configurations - configs=( - "ubuntu:../../docker/keeper/Dockerfile.ubuntu" - "alpine:../../docker/keeper/Dockerfile.alpine" - ) - - for config in "${configs[@]}"; do - # Split the config into variant and Dockerfile path - variant=${config%%:*} - dockerfile=${config##*:} - - VERSION_SUFFIX=$([ "$variant" = "ubuntu" ] && echo "" || echo "-$variant") - LABEL_VERSION="${CLICKHOUSE_VERSION_STRING}${VERSION_SUFFIX}" - TAGS=( - "--tag=${DOCKER_IMAGE}:${CLICKHOUSE_VERSION_STRING}${VERSION_SUFFIX}" - "--tag=${DOCKER_IMAGE}:${CLICKHOUSE_VERSION_MINOR}${VERSION_SUFFIX}" - "--tag=${DOCKER_IMAGE}:${CLICKHOUSE_VERSION_MAJOR}${VERSION_SUFFIX}" - ) - - if [ "$IS_LATEST" = "1" ]; then - TAGS+=("--tag=${DOCKER_IMAGE}:latest${VERSION_SUFFIX}") - fi - - echo "Following tags will be created: ${TAGS[*]}" - - # shellcheck disable=SC2086,SC2048 - docker buildx build \ - --platform=linux/amd64,linux/arm64 \ - --provenance=true \ - --sbom=true \ - --output=type=registry \ - --label=com.clickhoghuse.build.version="$LABEL_VERSION" \ - ${TAGS[*]} \ - --build-arg=VERSION="$CLICKHOUSE_VERSION_STRING" \ - --progress=plain \ - --file="$dockerfile" \ - ../../docker/keeper - done - - git checkout - - python3 ./create_release.py --set-progress-completed - # check out back if previous steps failed - - name: Checkout back - if: ${{ ! cancelled() }} - shell: bash - run: | - git checkout ${{ github.ref }} - - name: Update release info. Merge created PRs - shell: bash - run: | - python3 ./tests/ci/create_release.py --merge-prs ${{ inputs.dry-run == true && '--dry-run' || '' }} - - name: Set current Release progress to Completed with OK - shell: bash - run: | - # dummy stage to finalize release info with "progress: completed; status: OK" - python3 ./tests/ci/create_release.py --set-progress-started --progress "completed" - python3 ./tests/ci/create_release.py --set-progress-completed - - name: Post Slack Message - if: ${{ !cancelled() }} - shell: bash - run: | - python3 ./tests/ci/create_release.py --post-status ${{ inputs.dry-run == true && '--dry-run' || '' }} diff --git a/.github/workflows/custom_build_praktika.yml b/.github/workflows/custom_build_praktika.yml deleted file mode 100644 index f1b823047e68..000000000000 --- a/.github/workflows/custom_build_praktika.yml +++ /dev/null @@ -1,55 +0,0 @@ -# generated by praktika - -name: Build Praktika for PyPI -on: - workflow_dispatch: - inputs: - -env: - PYTHONUNBUFFERED: 1 - CHECKOUT_REF: "" - TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }} - -# Allow updating GH commit statuses and PR comments to post an actual job reports link -permissions: write-all - -jobs: - - build_praktika: - runs-on: [self-hosted, arm-large] - needs: [] - name: "Build Praktika" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_inputs.json << 'EOF' - ${{ toJson(github.event.inputs) }} - EOF - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build Praktika' --workflow "Build Praktika for PyPI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log diff --git a/.github/workflows/docker_publish.yml b/.github/workflows/docker_publish.yml new file mode 100644 index 000000000000..1e59aa8b5b8d --- /dev/null +++ b/.github/workflows/docker_publish.yml @@ -0,0 +1,150 @@ +name: Republish Multiarch Docker Image + +on: + workflow_dispatch: + inputs: + docker_image: + description: 'Multiarch Docker image with tag' + required: true + release_environment: + description: 'Select release type: "staging" or "production"' + type: choice + default: 'staging' + options: + - staging + - production + upload_artifacts: + description: 'Upload artifacts directly in this workflow' + type: boolean + default: true + s3_upload_path: + description: 'Upload artifacts to s3 path' + type: string + required: false + workflow_call: + inputs: + docker_image: + type: string + required: true + release_environment: + type: string + required: false + default: 'staging' + upload_artifacts: + type: boolean + required: false + default: false + s3_upload_path: + type: string + required: false + outputs: + image_archives_path: + description: 'Path to the image archives directory' + value: ${{ jobs.republish.outputs.image_archives_path }} + +env: + IMAGE: ${{ github.event.inputs.docker_image || inputs.docker_image }} + +jobs: + republish: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + outputs: + image_archives_path: ${{ steps.set_path.outputs.image_archives_path }} + steps: + - name: Docker Hub Login + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_TOKEN }} + + - name: Set clickhouse-server version as new tag + run: | + # Determine "clickhouse-server" or "clickhouse-keeper" + echo "Input IMAGE: $IMAGE" + COMPONENT=$(echo "$IMAGE" | sed -E 's|.*/(clickhouse-[^:]+):.*|\1|') + echo "Component determined: $COMPONENT" + echo "COMPONENT=$COMPONENT" >> $GITHUB_ENV + + # Pull the image + echo "Pulling the image" + docker pull $IMAGE + + # Get version and clean it up + echo "Getting version from image..." + VERSION_OUTPUT=$(docker run --rm $IMAGE $COMPONENT --version) + echo "Raw version output: $VERSION_OUTPUT" + + # Extract just the version number + NEW_TAG=$(echo "$VERSION_OUTPUT" | sed -E 's/.*version ([0-9.]+[^ ]*).*/\1/') + echo "Cleaned version: $NEW_TAG" + + # Append "-prerelease" if necessary + if [ "${{ github.event.inputs.release_environment || inputs.release_environment }}" = "staging" ]; then + NEW_TAG="${NEW_TAG}-prerelease" + fi + + if [[ "$IMAGE" == *-alpine* ]]; then + NEW_TAG="${NEW_TAG}-alpine" + fi + echo "New tag: $NEW_TAG" + + # Export the new tag + echo "NEW_TAG=$NEW_TAG" >> $GITHUB_ENV + + - name: Process multiarch manifest + run: | + echo "Re-tag multiarch image $IMAGE to altinity/$COMPONENT:$NEW_TAG" + docker buildx imagetools create --tag "altinity/$COMPONENT:$NEW_TAG" "$IMAGE" + + # Create directory for image archives + mkdir -p image_archives + + # Pull and save platform-specific images + for PLATFORM in "linux/amd64" "linux/arm64"; do + echo "Pulling and saving image for $PLATFORM..." + # Pull the specific platform image + docker pull --platform $PLATFORM "altinity/$COMPONENT:$NEW_TAG" + + # Save the image to a tar file + ARCH=$(echo $PLATFORM | cut -d'/' -f2) + docker save "altinity/$COMPONENT:$NEW_TAG" -o "image_archives/${COMPONENT}-${NEW_TAG}-${ARCH}.tar" + done + + # Save manifest inspection + docker buildx imagetools inspect "altinity/$COMPONENT:$NEW_TAG" > image_archives/manifest.txt + + # Compress the archives + cd image_archives + for file in *.tar; do + gzip "$file" + done + cd .. + + - name: Set image archives path + id: set_path + run: | + echo "image_archives_path=${{ github.workspace }}/image_archives" >> $GITHUB_OUTPUT + + - name: Upload image archives + if: ${{ github.event.inputs.upload_artifacts || inputs.upload_artifacts }} + uses: actions/upload-artifact@v4 + with: + name: docker-images-backup + path: image_archives/ + retention-days: 90 + + - name: Install aws cli + if: ${{ inputs.s3_upload_path != '' }} + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: arm64 + + - name: Upload to S3 + if: ${{ inputs.s3_upload_path != '' }} + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + run: | + aws s3 sync image_archives/ "${{ inputs.s3_upload_path }}" + diff --git a/.github/workflows/grype_scan.yml b/.github/workflows/grype_scan.yml new file mode 100644 index 000000000000..a92fec5f9879 --- /dev/null +++ b/.github/workflows/grype_scan.yml @@ -0,0 +1,154 @@ +name: Grype Scan +run-name: Grype Scan ${{ inputs.docker_image }} + +on: + workflow_dispatch: + # Inputs for manual run + inputs: + docker_image: + description: 'Docker image. If no tag, it will be determined by version_helper.py' + required: true + workflow_call: + # Inputs for workflow call + inputs: + docker_image: + description: 'Docker image. If no tag, it will be determined by version_helper.py' + required: true + type: string + version: + description: 'Version tag. If no version, it will be determined by version_helper.py' + required: false + type: string + default: "" + tag-suffix: + description: 'Tag suffix. To be appended the version from version_helper.py' + required: false + type: string + default: "" +env: + PYTHONUNBUFFERED: 1 + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + GRYPE_VERSION: "v0.92.2-arm64v8" + +jobs: + grype_scan: + name: Grype Scan + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker + uses: docker/setup-buildx-action@v3 + + - name: Set up Python + run: | + export TESTFLOWS_VERSION="2.4.19" + sudo apt-get update + sudo apt-get install -y python3-pip python3-venv + python3 -m venv venv + source venv/bin/activate + pip install --upgrade requests chardet urllib3 unidiff boto3 PyGithub + pip install testflows==$TESTFLOWS_VERSION awscli==1.33.28 + echo PATH=$PATH >>$GITHUB_ENV + + - name: Set image tag if not given + if: ${{ !contains(inputs.docker_image, ':') }} + id: set_version + env: + TAG_SUFFIX: ${{ inputs.tag-suffix }} + SPECIFIED_VERSION: ${{ inputs.version }} + run: | + python3 ./tests/ci/version_helper.py | grep = | tee /tmp/version_info + source /tmp/version_info + if [ -z "$SPECIFIED_VERSION" ]; then + VERSION=$CLICKHOUSE_VERSION_STRING + else + VERSION=$SPECIFIED_VERSION + fi + echo "docker_image=${{ inputs.docker_image }}:$PR_NUMBER-$VERSION$TAG_SUFFIX" >> $GITHUB_OUTPUT + + - name: Run Grype Scan + run: | + DOCKER_IMAGE=${{ steps.set_version.outputs.docker_image || inputs.docker_image }} + ./.github/grype/run_grype_scan.sh $DOCKER_IMAGE + + - name: Parse grype results + run: | + python3 -u ./.github/grype/parse_vulnerabilities_grype.py -o nice --no-colors --log raw.log --test-to-end + + - name: Transform and Upload Grype Results + if: always() + id: upload_results + env: + S3_BUCKET: "altinity-build-artifacts" + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + PR_NUMBER: ${{ env.PR_NUMBER || github.event.pull_request.number || 0 }} + DOCKER_IMAGE: ${{ steps.set_version.outputs.docker_image || inputs.docker_image }} + run: | + echo "PR_NUMBER=$PR_NUMBER" + ./.github/grype/transform_and_upload_results_s3.sh + + - name: Create step summary + if: always() + id: create_summary + run: | + jq -r '"**Image**: \(.source.target.userInput)"' result.json >> $GITHUB_STEP_SUMMARY + jq -r '.distro | "**Distro**: \(.name):\(.version)"' result.json >> $GITHUB_STEP_SUMMARY + if jq -e '.matches | length == 0' result.json > /dev/null; then + echo "No CVEs" >> $GITHUB_STEP_SUMMARY + else + echo "| Severity | Count |" >> $GITHUB_STEP_SUMMARY + echo "|------------|-------|" >> $GITHUB_STEP_SUMMARY + jq -r ' + .matches | + map(.vulnerability.severity) | + group_by(.) | + map({severity: .[0], count: length}) | + sort_by(.severity) | + map("| \(.severity) | \(.count) |") | + .[] + ' result.json >> $GITHUB_STEP_SUMMARY + fi + + HIGH_COUNT=$(jq -r '.matches | map(.vulnerability) | unique_by(.id) | map(.severity) | map(select(. == "High")) | length' result.json) + CRITICAL_COUNT=$(jq -r '.matches | map(.vulnerability) | unique_by(.id) | map(.severity) | map(select(. == "Critical")) | length' result.json) + TOTAL_HIGH_CRITICAL=$((HIGH_COUNT + CRITICAL_COUNT)) + echo "total_high_critical=$TOTAL_HIGH_CRITICAL" >> $GITHUB_OUTPUT + + if [ $TOTAL_HIGH_CRITICAL -gt 0 ]; then + echo '## High and Critical vulnerabilities found' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + cat raw.log | tfs --no-colors show tests | grep -Pi 'High|Critical' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + fi + + - name: Set commit status + if: always() + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const totalHighCritical = '${{ steps.create_summary.outputs.total_high_critical }}'; + const hasError = totalHighCritical === ''; + const hasVulnerabilities = parseInt(totalHighCritical) > 0; + github.rest.repos.createCommitStatus({ + owner: context.repo.owner, + repo: context.repo.repo, + sha: '${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}', + state: hasError ? 'error' : hasVulnerabilities ? 'failure' : 'success', + target_url: '${{ steps.upload_results.outputs.https_report_path }}', + description: hasError ? 'An error occurred' : `Grype Scan Completed with ${totalHighCritical} high/critical vulnerabilities`, + context: 'Grype Scan ${{ steps.set_version.outputs.docker_image || inputs.docker_image }}' + }); + + - name: Upload artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: grype-results-${{ hashFiles('raw.log') }} + path: | + result.json + nice.log.txt diff --git a/.github/workflows/hourly.yml b/.github/workflows/hourly.yml deleted file mode 100644 index 615377cb7b0c..000000000000 --- a/.github/workflows/hourly.yml +++ /dev/null @@ -1,128 +0,0 @@ -# generated by praktika - -name: Hourly -on: - schedule: - - cron: 0 */1 * * 1-5 - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }} - -env: - PYTHONUNBUFFERED: 1 - CHECKOUT_REF: "" - - -jobs: - - config_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [] - name: "Config Workflow" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Config Workflow' --workflow "Hourly" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - collect_flaky_tests: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow] - name: "Collect flaky tests" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Collect flaky tests' --workflow "Hourly" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - autoassign_approvers: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow] - name: "Autoassign approvers" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Autoassign approvers' --workflow "Hourly" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log diff --git a/.github/workflows/init_praktika.yml b/.github/workflows/init_praktika.yml new file mode 100644 index 000000000000..42aaaeedde00 --- /dev/null +++ b/.github/workflows/init_praktika.yml @@ -0,0 +1,29 @@ +name: InitPraktikaReport + +# This workflow is used to initialize/update the praktika report in S3. +# It does not need to run often, when a new release is created should be plenty. + +on: + workflow_dispatch: + +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + +jobs: + + init_praktika: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + name: "Init praktika" + steps: + - name: Checkout code + uses: actions/checkout@v6 + + - name: Init praktika report + run: | + pip install htmlmin + python3 -m ci.praktika infrastructure --deploy --only html + \ No newline at end of file diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 16df894a4e42..5ec979e83230 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -3,21 +3,41 @@ name: MasterCI on: + workflow_dispatch: + inputs: + no_cache: + description: Run without cache + required: false + type: boolean + default: false push: - branches: ['master'] + branches: ['antalya', 'releases/*', 'antalya-*'] env: # Force the stdout and stderr streams to be unbuffered PYTHONUNBUFFERED: 1 + GH_TOKEN: ${{ github.token }} CHECKOUT_REF: "" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }} + CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + AZURE_STORAGE_KEY: ${{ secrets.AZURE_STORAGE_KEY }} + AZURE_ACCOUNT_NAME: ${{ secrets.AZURE_ACCOUNT_NAME }} + AZURE_CONTAINER_NAME: ${{ secrets.AZURE_CONTAINER_NAME }} + AZURE_STORAGE_ACCOUNT_URL: "https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/" + ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }} + -# Allow updating GH commit statuses and PR comments to post an actual job reports link -permissions: write-all jobs: config_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [] name: "Config Workflow" outputs: @@ -29,6 +49,27 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + if: ${{ !failure() && env.AWS_ACCESS_KEY_ID && env.AWS_SECRET_ACCESS_KEY }} + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -54,7 +95,7 @@ jobs: for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log dockers_build_amd: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} name: "Dockers Build (amd)" @@ -67,6 +108,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (amd)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -92,7 +140,7 @@ jobs: for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} name: "Dockers Build (arm)" @@ -105,6 +153,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (arm)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -130,7 +185,7 @@ jobs: for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log dockers_build_multiplatform_manifest: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAobXVsdGlwbGF0Zm9ybSBtYW5pZmVzdCk=') }} name: "Dockers Build (multiplatform manifest)" @@ -143,43 +198,12 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - build_arm_tidy: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV90aWR5KQ==') }} - name: "Build (arm_tidy)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup with: - ref: ${{ env.CHECKOUT_REF }} + test_name: "Dockers Build (multiplatform manifest)" - name: Prepare env script run: | @@ -201,12 +225,12 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_tidy)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log build_amd_debug: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }} name: "Build (amd_debug)" @@ -219,6 +243,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -244,7 +275,7 @@ jobs: for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log build_amd_asan: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }} name: "Build (amd_asan)" @@ -257,6 +288,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -282,7 +320,7 @@ jobs: for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log build_amd_tsan: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }} name: "Build (amd_tsan)" @@ -295,6 +333,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -320,7 +365,7 @@ jobs: for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log build_amd_msan: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9tc2FuKQ==') }} name: "Build (amd_msan)" @@ -333,6 +378,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -358,7 +410,7 @@ jobs: for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log build_amd_ubsan: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF91YnNhbik=') }} name: "Build (amd_ubsan)" @@ -371,6 +423,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -396,7 +455,7 @@ jobs: for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log build_amd_binary: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }} name: "Build (amd_binary)" @@ -409,6 +468,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_binary)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -434,7 +500,7 @@ jobs: for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log build_arm_asan: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9hc2FuKQ==') }} name: "Build (arm_asan)" @@ -447,6 +513,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -472,7 +545,7 @@ jobs: for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log build_arm_binary: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9iaW5hcnkp') }} name: "Build (arm_binary)" @@ -485,6 +558,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_binary)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -509,11 +589,11 @@ jobs: prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_llvm_coverage_build: - runs-on: [self-hosted, amd-large] + build_amd_coverage: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGxsdm1fY292ZXJhZ2VfYnVpbGQp') }} - name: "Build (llvm_coverage_build)" + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9jb3ZlcmFnZSk=') }} + name: "Build (amd_coverage)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -523,6 +603,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_coverage)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -543,12 +630,12 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (llvm_coverage_build)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_coverage)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log build_amd_release: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }} name: "Build (amd_release)" @@ -561,6 +648,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -586,7 +680,7 @@ jobs: for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log build_arm_release: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-builder] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }} name: "Build (arm_release)" @@ -599,6 +693,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -623,11 +724,11 @@ jobs: prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_amd_darwin: - runs-on: [self-hosted, amd-large] - needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_amd_msan, build_amd_tsan, build_amd_ubsan, build_arm_asan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }} - name: "Build (amd_darwin)" + unit_tests_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAoYXNhbik=') }} + name: "Unit tests (asan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -637,6 +738,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -657,15 +765,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_darwin)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Unit tests (asan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_arm_darwin: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_amd_msan, build_amd_tsan, build_amd_ubsan, build_arm_asan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }} - name: "Build (arm_darwin)" + unit_tests_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAodHNhbik=') }} + name: "Unit tests (tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -675,6 +783,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -695,15 +810,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_darwin)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Unit tests (tsan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_arm_v80compat: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_amd_msan, build_amd_tsan, build_amd_ubsan, build_arm_asan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV92ODBjb21wYXQp') }} - name: "Build (arm_v80compat)" + unit_tests_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAobXNhbik=') }} + name: "Unit tests (msan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -713,6 +828,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -733,15 +855,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_v80compat)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Unit tests (msan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_amd_freebsd: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_amd_msan, build_amd_tsan, build_amd_ubsan, build_arm_asan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9mcmVlYnNkKQ==') }} - name: "Build (amd_freebsd)" + unit_tests_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_ubsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAodWJzYW4p') }} + name: "Unit tests (ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -751,6 +873,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -771,15 +900,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_freebsd)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Unit tests (ubsan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_ppc64le: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_amd_msan, build_amd_tsan, build_amd_ubsan, build_arm_asan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKHBwYzY0bGUp') }} - name: "Build (ppc64le)" + docker_server_image: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [build_amd_release, build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} + name: "Docker server image" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -789,6 +918,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker server image" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -809,15 +945,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (ppc64le)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Docker server image' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_amd_compat: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_amd_msan, build_amd_tsan, build_amd_ubsan, build_arm_asan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9jb21wYXQp') }} - name: "Build (amd_compat)" + docker_keeper_image: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [build_amd_release, build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} + name: "Docker keeper image" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -827,6 +963,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker keeper image" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -847,15 +990,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_compat)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Docker keeper image' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_amd_musl: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_amd_msan, build_amd_tsan, build_amd_ubsan, build_arm_asan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9tdXNsKQ==') }} - name: "Build (amd_musl)" + install_packages_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX3JlbGVhc2Up') }} + name: "Install packages (amd_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -865,6 +1008,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -885,15 +1035,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_musl)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Install packages (amd_release)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_riscv64: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_amd_msan, build_amd_tsan, build_amd_ubsan, build_arm_asan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKHJpc2N2NjQp') }} - name: "Build (riscv64)" + install_packages_arm_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYXJtX3JlbGVhc2Up') }} + name: "Install packages (arm_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -903,6 +1053,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -923,15 +1080,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (riscv64)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Install packages (arm_release)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_s390x: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_amd_msan, build_amd_tsan, build_amd_ubsan, build_arm_asan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKHMzOTB4KQ==') }} - name: "Build (s390x)" + compatibility_check_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [build_amd_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYW1kX3JlbGVhc2Up') }} + name: "Compatibility check (amd_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -941,6 +1098,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Compatibility check (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -961,15 +1125,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (s390x)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Compatibility check (amd_release)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_loongarch64: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_amd_msan, build_amd_tsan, build_amd_ubsan, build_arm_asan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGxvb25nYXJjaDY0KQ==') }} - name: "Build (loongarch64)" + compatibility_check_arm_release: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYXJtX3JlbGVhc2Up') }} + name: "Compatibility check (arm_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -979,6 +1143,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Compatibility check (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -999,15 +1170,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (loongarch64)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Compatibility check (arm_release)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_arm_fuzzers: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_amd_msan, build_amd_tsan, build_amd_ubsan, build_arm_asan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9mdXp6ZXJzKQ==') }} - name: "Build (arm_fuzzers)" + stateless_tests_amd_asan_distributed_plan_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1017,6 +1188,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1037,15 +1215,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_fuzzers)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - smoke_test_amd_darwin: - runs-on: [self-hosted, amd_macos_m1] - needs: [build_amd_asan, build_amd_binary, build_amd_darwin, build_amd_debug, build_amd_msan, build_amd_tsan, build_amd_ubsan, build_arm_asan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U21va2UgdGVzdCAoYW1kX2Rhcndpbik=') }} - name: "Smoke test (amd_darwin)" + stateless_tests_amd_asan_distributed_plan_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1055,6 +1233,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1075,15 +1260,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Smoke test (amd_darwin)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - unit_tests_asan: - runs-on: [self-hosted, amd-large] + stateless_tests_amd_asan_db_disk_distributed_plan_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAoYXNhbik=') }} - name: "Unit tests (asan)" + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGIgZGlzaywgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }} + name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1093,6 +1278,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1113,15 +1305,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Unit tests (asan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_asan, db disk, distributed plan, sequential)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - unit_tests_tsan: - runs-on: [self-hosted, amd-large] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAodHNhbik=') }} - name: "Unit tests (tsan)" + stateless_tests_amd_debug_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_debug, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_debug, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1131,6 +1323,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1151,15 +1350,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Unit tests (tsan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - unit_tests_msan: - runs-on: [self-hosted, amd-large] - needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAobXNhbik=') }} - name: "Unit tests (msan)" + stateless_tests_amd_debug_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_debug, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (amd_debug, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1169,6 +1368,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1189,15 +1395,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Unit tests (msan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - unit_tests_ubsan: - runs-on: [self-hosted, amd-large] - needs: [build_amd_ubsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAodWJzYW4p') }} - name: "Unit tests (ubsan)" + stateless_tests_amd_tsan_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_tsan, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1207,6 +1413,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1227,15 +1440,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Unit tests (ubsan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - unit_tests_amd_llvm_coverage: - runs-on: [self-hosted, amd-large] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAoYW1kX2xsdm1fY292ZXJhZ2Up') }} - name: "Unit tests (amd_llvm_coverage)" + stateless_tests_amd_tsan_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_tsan, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1245,6 +1458,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1265,15 +1485,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Unit tests (amd_llvm_coverage)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - docker_server_image: - runs-on: [self-hosted, style-checker] - needs: [build_amd_release, build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} - name: "Docker server image" + stateless_tests_amd_tsan_sequential_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }} + name: "Stateless tests (amd_tsan, sequential, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1283,6 +1503,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, sequential, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1303,15 +1530,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Docker server image' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - docker_keeper_image: - runs-on: [self-hosted, style-checker] - needs: [build_amd_release, build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} - name: "Docker keeper image" + stateless_tests_amd_tsan_sequential_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }} + name: "Stateless tests (amd_tsan, sequential, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1321,6 +1548,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, sequential, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1341,15 +1575,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Docker keeper image' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - install_packages_amd_release: - runs-on: [self-hosted, style-checker] - needs: [build_amd_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX3JlbGVhc2Up') }} - name: "Install packages (amd_release)" + stateless_tests_amd_msan_wasmedge_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgV2FzbUVkZ2UsIHBhcmFsbGVsLCAxLzIp') }} + name: "Stateless tests (amd_msan, WasmEdge, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1359,6 +1593,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, WasmEdge, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1379,15 +1620,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Install packages (amd_release)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_msan, WasmEdge, parallel, 1/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - install_packages_arm_release: - runs-on: [self-hosted, style-checker-aarch64] - needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYXJtX3JlbGVhc2Up') }} - name: "Install packages (arm_release)" + stateless_tests_amd_msan_wasmedge_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgV2FzbUVkZ2UsIHBhcmFsbGVsLCAyLzIp') }} + name: "Stateless tests (amd_msan, WasmEdge, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1397,6 +1638,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, WasmEdge, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1417,1383 +1665,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Install packages (arm_release)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_msan, WasmEdge, parallel, 2/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - compatibility_check_amd_release: - runs-on: [self-hosted, style-checker] - needs: [build_amd_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYW1kX3JlbGVhc2Up') }} - name: "Compatibility check (amd_release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Compatibility check (amd_release)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - compatibility_check_arm_release: - runs-on: [self-hosted, style-checker-aarch64] - needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYXJtX3JlbGVhc2Up') }} - name: "Compatibility check (arm_release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Compatibility check (arm_release)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_asan_distributed_plan_parallel_1_2: - runs-on: [self-hosted, amd-medium-cpu] - needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }} - name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_asan_distributed_plan_parallel_2_2: - runs-on: [self-hosted, amd-medium-cpu] - needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }} - name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_asan_db_disk_distributed_plan_sequential: - runs-on: [self-hosted, amd-small-mem] - needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGIgZGlzaywgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }} - name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_asan, db disk, distributed plan, sequential)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_llvm_coverage_old_analyzer_s3_storage_databasereplicated_wasmedge_parallel: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbGx2bV9jb3ZlcmFnZSwgb2xkIGFuYWx5emVyLCBzMyBzdG9yYWdlLCBEYXRhYmFzZVJlcGxpY2F0ZWQsIFdhc21FZGdlLCBwYXJhbGxlbCk=') }} - name: "Stateless tests (amd_llvm_coverage, old analyzer, s3 storage, DatabaseReplicated, WasmEdge, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_llvm_coverage, old analyzer, s3 storage, DatabaseReplicated, WasmEdge, parallel)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_llvm_coverage_old_analyzer_s3_storage_databasereplicated_wasmedge_sequential: - runs-on: [self-hosted, amd-small] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbGx2bV9jb3ZlcmFnZSwgb2xkIGFuYWx5emVyLCBzMyBzdG9yYWdlLCBEYXRhYmFzZVJlcGxpY2F0ZWQsIFdhc21FZGdlLCBzZXF1ZW50aWFsKQ==') }} - name: "Stateless tests (amd_llvm_coverage, old analyzer, s3 storage, DatabaseReplicated, WasmEdge, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_llvm_coverage, old analyzer, s3 storage, DatabaseReplicated, WasmEdge, sequential)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_llvm_coverage_parallelreplicas_s3_storage_parallel: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbGx2bV9jb3ZlcmFnZSwgUGFyYWxsZWxSZXBsaWNhcywgczMgc3RvcmFnZSwgcGFyYWxsZWwp') }} - name: "Stateless tests (amd_llvm_coverage, ParallelReplicas, s3 storage, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_llvm_coverage, ParallelReplicas, s3 storage, parallel)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_llvm_coverage_parallelreplicas_s3_storage_sequential: - runs-on: [self-hosted, amd-small] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbGx2bV9jb3ZlcmFnZSwgUGFyYWxsZWxSZXBsaWNhcywgczMgc3RvcmFnZSwgc2VxdWVudGlhbCk=') }} - name: "Stateless tests (amd_llvm_coverage, ParallelReplicas, s3 storage, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_llvm_coverage, ParallelReplicas, s3 storage, sequential)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_llvm_coverage_asyncinsert_s3_storage_parallel: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbGx2bV9jb3ZlcmFnZSwgQXN5bmNJbnNlcnQsIHMzIHN0b3JhZ2UsIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd_llvm_coverage, AsyncInsert, s3 storage, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_llvm_coverage, AsyncInsert, s3 storage, parallel)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_llvm_coverage_asyncinsert_s3_storage_sequential: - runs-on: [self-hosted, amd-small] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbGx2bV9jb3ZlcmFnZSwgQXN5bmNJbnNlcnQsIHMzIHN0b3JhZ2UsIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (amd_llvm_coverage, AsyncInsert, s3 storage, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_llvm_coverage, AsyncInsert, s3 storage, sequential)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_debug_parallel: - runs-on: [self-hosted, amd-medium-cpu] - needs: [build_amd_debug, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd_debug, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_debug_sequential: - runs-on: [self-hosted, amd-small] - needs: [build_amd_debug, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (amd_debug, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_tsan_parallel_1_2: - runs-on: [self-hosted, amd-large] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDEvMik=') }} - name: "Stateless tests (amd_tsan, parallel, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_tsan_parallel_2_2: - runs-on: [self-hosted, amd-large] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDIvMik=') }} - name: "Stateless tests (amd_tsan, parallel, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_tsan_sequential_1_2: - runs-on: [self-hosted, amd-small] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }} - name: "Stateless tests (amd_tsan, sequential, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_tsan_sequential_2_2: - runs-on: [self-hosted, amd-small] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }} - name: "Stateless tests (amd_tsan, sequential, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_msan_wasmedge_parallel_1_2: - runs-on: [self-hosted, amd-large] - needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgV2FzbUVkZ2UsIHBhcmFsbGVsLCAxLzIp') }} - name: "Stateless tests (amd_msan, WasmEdge, parallel, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_msan, WasmEdge, parallel, 1/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_msan_wasmedge_parallel_2_2: - runs-on: [self-hosted, amd-large] - needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgV2FzbUVkZ2UsIHBhcmFsbGVsLCAyLzIp') }} - name: "Stateless tests (amd_msan, WasmEdge, parallel, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_msan, WasmEdge, parallel, 2/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_msan_wasmedge_sequential_1_2: - runs-on: [self-hosted, amd-small-mem] - needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgV2FzbUVkZ2UsIHNlcXVlbnRpYWwsIDEvMik=') }} - name: "Stateless tests (amd_msan, WasmEdge, sequential, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_msan, WasmEdge, sequential, 1/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_msan_wasmedge_sequential_2_2: - runs-on: [self-hosted, amd-small-mem] - needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgV2FzbUVkZ2UsIHNlcXVlbnRpYWwsIDIvMik=') }} - name: "Stateless tests (amd_msan, WasmEdge, sequential, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_msan, WasmEdge, sequential, 2/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_ubsan_parallel: - runs-on: [self-hosted, amd-small-mem] - needs: [build_amd_ubsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd_ubsan, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_ubsan_sequential: - runs-on: [self-hosted, amd-small-mem] - needs: [build_amd_ubsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (amd_ubsan, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_debug_distributed_plan_s3_storage_parallel: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_debug, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_debug_distributed_plan_s3_storage_sequential: - runs-on: [self-hosted, amd-small] - needs: [build_amd_debug, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_tsan_s3_storage_parallel_1_2: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwsIDEvMik=') }} - name: "Stateless tests (amd_tsan, s3 storage, parallel, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 1/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_tsan_s3_storage_parallel_2_2: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwsIDIvMik=') }} - name: "Stateless tests (amd_tsan, s3 storage, parallel, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 2/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_tsan_s3_storage_sequential_1_2: - runs-on: [self-hosted, amd-small-mem] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMS8yKQ==') }} - name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_tsan_s3_storage_sequential_2_2: - runs-on: [self-hosted, amd-small-mem] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMi8yKQ==') }} - name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_arm_binary_parallel: - runs-on: [self-hosted, arm-medium-cpu] - needs: [build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBwYXJhbGxlbCk=') }} - name: "Stateless tests (arm_binary, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_arm_binary_sequential: - runs-on: [self-hosted, arm-small] - needs: [build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBzZXF1ZW50aWFsKQ==') }} - name: "Stateless tests (arm_binary, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_llvm_coverage_1_3: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbGx2bV9jb3ZlcmFnZSwgMS8zKQ==') }} - name: "Stateless tests (amd_llvm_coverage, 1/3)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_llvm_coverage, 1/3)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_llvm_coverage_2_3: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbGx2bV9jb3ZlcmFnZSwgMi8zKQ==') }} - name: "Stateless tests (amd_llvm_coverage, 2/3)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_llvm_coverage, 2/3)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_llvm_coverage_3_3: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbGx2bV9jb3ZlcmFnZSwgMy8zKQ==') }} - name: "Stateless tests (amd_llvm_coverage, 3/3)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_llvm_coverage, 3/3)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_arm_asan_azure_parallel: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbiwgYXp1cmUsIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (arm_asan, azure, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (arm_asan, azure, parallel)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_arm_asan_azure_sequential: - runs-on: [self-hosted, arm-small-mem] - needs: [build_arm_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbiwgYXp1cmUsIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (arm_asan, azure, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (arm_asan, azure, sequential)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_asan_db_disk_old_analyzer_1_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" + stateless_tests_amd_msan_wasmedge_sequential_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgV2FzbUVkZ2UsIHNlcXVlbnRpYWwsIDEvMik=') }} + name: "Stateless tests (amd_msan, WasmEdge, sequential, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2803,43 +1683,12 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 1/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_asan_db_disk_old_analyzer_2_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup with: - ref: ${{ env.CHECKOUT_REF }} + test_name: "Stateless tests (amd_msan, WasmEdge, sequential, 1/2)" - name: Prepare env script run: | @@ -2861,15 +1710,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 2/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_msan, WasmEdge, sequential, 1/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_asan_db_disk_old_analyzer_3_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" + stateless_tests_amd_msan_wasmedge_sequential_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgV2FzbUVkZ2UsIHNlcXVlbnRpYWwsIDIvMik=') }} + name: "Stateless tests (amd_msan, WasmEdge, sequential, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2879,43 +1728,12 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 3/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_asan_db_disk_old_analyzer_4_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup with: - ref: ${{ env.CHECKOUT_REF }} + test_name: "Stateless tests (amd_msan, WasmEdge, sequential, 2/2)" - name: Prepare env script run: | @@ -2937,15 +1755,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 4/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_msan, WasmEdge, sequential, 2/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_asan_db_disk_old_analyzer_5_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" + stateless_tests_amd_ubsan_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_ubsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_ubsan, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2955,6 +1773,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_ubsan, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2975,15 +1800,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 5/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_asan_db_disk_old_analyzer_6_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" + stateless_tests_amd_ubsan_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_ubsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (amd_ubsan, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2993,6 +1818,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_ubsan, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3013,15 +1845,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 6/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_binary_1_5: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDEvNSk=') }} - name: "Integration tests (amd_binary, 1/5)" + stateless_tests_amd_debug_distributed_plan_s3_storage_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_debug, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3031,6 +1863,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3051,15 +1890,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_binary_2_5: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDIvNSk=') }} - name: "Integration tests (amd_binary, 2/5)" + stateless_tests_amd_debug_distributed_plan_s3_storage_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_debug, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3069,6 +1908,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3089,15 +1935,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_binary_3_5: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDMvNSk=') }} - name: "Integration tests (amd_binary, 3/5)" + stateless_tests_amd_tsan_s3_storage_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_tsan, s3 storage, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3107,6 +1953,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3127,15 +1980,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 1/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_binary_4_5: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDQvNSk=') }} - name: "Integration tests (amd_binary, 4/5)" + stateless_tests_amd_tsan_s3_storage_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_tsan, s3 storage, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3145,6 +1998,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3165,15 +2025,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 2/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_binary_5_5: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDUvNSk=') }} - name: "Integration tests (amd_binary, 5/5)" + stateless_tests_amd_tsan_s3_storage_sequential_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMS8yKQ==') }} + name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3183,6 +2043,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3203,15 +2070,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_arm_binary_distributed_plan_1_4: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDEvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 1/4)" + stateless_tests_amd_tsan_s3_storage_sequential_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMi8yKQ==') }} + name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3221,6 +2088,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3241,15 +2115,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_arm_binary_distributed_plan_2_4: - runs-on: [self-hosted, arm-medium] + stateless_tests_arm_binary_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDIvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 2/4)" + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBwYXJhbGxlbCk=') }} + name: "Stateless tests (arm_binary, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3259,6 +2133,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_binary, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3279,15 +2160,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_arm_binary_distributed_plan_3_4: - runs-on: [self-hosted, arm-medium] + stateless_tests_arm_binary_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDMvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 3/4)" + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBzZXF1ZW50aWFsKQ==') }} + name: "Stateless tests (arm_binary, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3297,6 +2178,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_binary, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3317,15 +2205,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_arm_binary_distributed_plan_4_4: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDQvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 4/4)" + stateless_tests_arm_asan_azure_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_arm_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbiwgYXp1cmUsIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (arm_asan, azure, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3335,6 +2223,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_asan, azure, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3355,15 +2250,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (arm_asan, azure, parallel)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_tsan_1_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }} - name: "Integration tests (amd_tsan, 1/6)" + stateless_tests_arm_asan_azure_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_arm_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbiwgYXp1cmUsIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (arm_asan, azure, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3373,6 +2268,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_asan, azure, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3393,15 +2295,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (arm_asan, azure, sequential)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_tsan_2_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }} - name: "Integration tests (amd_tsan, 2/6)" + integration_tests_amd_asan_db_disk_old_analyzer_1_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3411,6 +2313,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3431,15 +2340,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 1/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_tsan_3_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }} - name: "Integration tests (amd_tsan, 3/6)" + integration_tests_amd_asan_db_disk_old_analyzer_2_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3449,6 +2358,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3469,15 +2385,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 2/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_tsan_4_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }} - name: "Integration tests (amd_tsan, 4/6)" + integration_tests_amd_asan_db_disk_old_analyzer_3_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3487,6 +2403,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3507,15 +2430,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 3/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_tsan_5_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }} - name: "Integration tests (amd_tsan, 5/6)" + integration_tests_amd_asan_db_disk_old_analyzer_4_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3525,6 +2448,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3545,15 +2475,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 4/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_tsan_6_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }} - name: "Integration tests (amd_tsan, 6/6)" + integration_tests_amd_asan_db_disk_old_analyzer_5_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3563,6 +2493,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3583,15 +2520,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 5/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_msan_1_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCAxLzYp') }} - name: "Integration tests (amd_msan, 1/6)" + integration_tests_amd_asan_db_disk_old_analyzer_6_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3601,6 +2538,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3621,15 +2565,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 1/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 6/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_msan_2_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCAyLzYp') }} - name: "Integration tests (amd_msan, 2/6)" + integration_tests_amd_binary_1_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDEvNSk=') }} + name: "Integration tests (amd_binary, 1/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3639,6 +2583,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 1/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3659,15 +2610,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 2/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_msan_3_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCAzLzYp') }} - name: "Integration tests (amd_msan, 3/6)" + integration_tests_amd_binary_2_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDIvNSk=') }} + name: "Integration tests (amd_binary, 2/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3677,6 +2628,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 2/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3697,15 +2655,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 3/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_msan_4_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCA0LzYp') }} - name: "Integration tests (amd_msan, 4/6)" + integration_tests_amd_binary_3_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDMvNSk=') }} + name: "Integration tests (amd_binary, 3/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3715,6 +2673,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 3/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3735,15 +2700,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 4/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_msan_5_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCA1LzYp') }} - name: "Integration tests (amd_msan, 5/6)" + integration_tests_amd_binary_4_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDQvNSk=') }} + name: "Integration tests (amd_binary, 4/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3753,6 +2718,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 4/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3773,15 +2745,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 5/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_msan_6_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCA2LzYp') }} - name: "Integration tests (amd_msan, 6/6)" + integration_tests_amd_binary_5_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDUvNSk=') }} + name: "Integration tests (amd_binary, 5/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3791,6 +2763,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 5/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3811,15 +2790,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 6/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_llvm_coverage_1_5: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9sbHZtX2NvdmVyYWdlLCAxLzUp') }} - name: "Integration tests (amd_llvm_coverage, 1/5)" + integration_tests_arm_binary_distributed_plan_1_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDEvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 1/4)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3829,6 +2808,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 1/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3849,15 +2835,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_llvm_coverage, 1/5)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_llvm_coverage_2_5: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9sbHZtX2NvdmVyYWdlLCAyLzUp') }} - name: "Integration tests (amd_llvm_coverage, 2/5)" + integration_tests_arm_binary_distributed_plan_2_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDIvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 2/4)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3867,6 +2853,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 2/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3887,15 +2880,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_llvm_coverage, 2/5)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_llvm_coverage_3_5: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9sbHZtX2NvdmVyYWdlLCAzLzUp') }} - name: "Integration tests (amd_llvm_coverage, 3/5)" + integration_tests_arm_binary_distributed_plan_3_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDMvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 3/4)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3905,6 +2898,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 3/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3925,15 +2925,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_llvm_coverage, 3/5)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_llvm_coverage_4_5: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9sbHZtX2NvdmVyYWdlLCA0LzUp') }} - name: "Integration tests (amd_llvm_coverage, 4/5)" + integration_tests_arm_binary_distributed_plan_4_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDQvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 4/4)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3943,6 +2943,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 4/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -3963,15 +2970,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_llvm_coverage, 4/5)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_llvm_coverage_5_5: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9sbHZtX2NvdmVyYWdlLCA1LzUp') }} - name: "Integration tests (amd_llvm_coverage, 5/5)" + integration_tests_amd_tsan_1_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }} + name: "Integration tests (amd_tsan, 1/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -3981,6 +2988,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4001,15 +3015,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_llvm_coverage, 5/5)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stress_test_amd_release: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9yZWxlYXNlKQ==') }} - name: "Stress test (amd_release)" + integration_tests_amd_tsan_2_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }} + name: "Integration tests (amd_tsan, 2/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4019,6 +3033,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4039,15 +3060,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_release)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stress_test_amd_debug: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_debug, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9kZWJ1Zyk=') }} - name: "Stress test (amd_debug)" + integration_tests_amd_tsan_3_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }} + name: "Integration tests (amd_tsan, 3/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4057,6 +3078,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4077,15 +3105,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_debug)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stress_test_amd_tsan: - runs-on: [self-hosted, amd-medium] + integration_tests_amd_tsan_4_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }} - name: "Stress test (amd_tsan)" + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }} + name: "Integration tests (amd_tsan, 4/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4095,6 +3123,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4115,15 +3150,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_tsan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stress_test_arm_asan: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuKQ==') }} - name: "Stress test (arm_asan)" + integration_tests_amd_tsan_5_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }} + name: "Integration tests (amd_tsan, 5/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4133,6 +3168,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4153,15 +3195,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (arm_asan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stress_test_arm_asan_s3: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuLCBzMyk=') }} - name: "Stress test (arm_asan, s3)" + integration_tests_amd_tsan_6_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }} + name: "Integration tests (amd_tsan, 6/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4171,6 +3213,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4191,15 +3240,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (arm_asan, s3)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stress_test_amd_ubsan: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_ubsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF91YnNhbik=') }} - name: "Stress test (amd_ubsan)" + integration_tests_amd_msan_1_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCAxLzYp') }} + name: "Integration tests (amd_msan, 1/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4209,6 +3258,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_msan, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4229,15 +3285,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 1/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stress_test_amd_msan: - runs-on: [self-hosted, amd-medium] + integration_tests_amd_msan_2_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9tc2FuKQ==') }} - name: "Stress test (amd_msan)" + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCAyLzYp') }} + name: "Integration tests (amd_msan, 2/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4247,6 +3303,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_msan, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4267,15 +3330,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_msan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 2/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stress_test_azure_amd_msan: - runs-on: [self-hosted, amd-medium] + integration_tests_amd_msan_3_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGF6dXJlLCBhbWRfbXNhbik=') }} - name: "Stress test (azure, amd_msan)" + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCAzLzYp') }} + name: "Integration tests (amd_msan, 3/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4285,6 +3348,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_msan, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4305,15 +3375,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (azure, amd_msan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 3/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stress_test_azure_amd_tsan: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGF6dXJlLCBhbWRfdHNhbik=') }} - name: "Stress test (azure, amd_tsan)" + integration_tests_amd_msan_4_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCA0LzYp') }} + name: "Integration tests (amd_msan, 4/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4323,6 +3393,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_msan, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4343,15 +3420,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (azure, amd_tsan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 4/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - ast_fuzzer_amd_debug: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_debug, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX2RlYnVnKQ==') }} - name: "AST fuzzer (amd_debug)" + integration_tests_amd_msan_5_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCA1LzYp') }} + name: "Integration tests (amd_msan, 5/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4361,6 +3438,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_msan, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4381,15 +3465,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 5/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - ast_fuzzer_arm_asan: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYXJtX2FzYW4p') }} - name: "AST fuzzer (arm_asan)" + integration_tests_amd_msan_6_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCA2LzYp') }} + name: "Integration tests (amd_msan, 6/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4399,6 +3483,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_msan, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4419,15 +3510,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (arm_asan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 6/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - ast_fuzzer_amd_tsan: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3RzYW4p') }} - name: "AST fuzzer (amd_tsan)" + stress_test_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9yZWxlYXNlKQ==') }} + name: "Stress test (amd_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4437,6 +3528,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4457,15 +3555,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_release)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - ast_fuzzer_amd_msan: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX21zYW4p') }} - name: "AST fuzzer (amd_msan)" + stress_test_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_debug, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9kZWJ1Zyk=') }} + name: "Stress test (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4475,6 +3573,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4495,15 +3600,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_debug)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - ast_fuzzer_amd_ubsan: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_ubsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3Vic2FuKQ==') }} - name: "AST fuzzer (amd_ubsan)" + stress_test_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }} + name: "Stress test (amd_tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4513,6 +3618,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4533,15 +3645,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_tsan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - buzzhouse_amd_debug: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_debug, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfZGVidWcp') }} - name: "BuzzHouse (amd_debug)" + stress_test_arm_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_arm_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuKQ==') }} + name: "Stress test (arm_asan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4551,6 +3663,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (arm_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4571,15 +3690,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (arm_asan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - buzzhouse_arm_asan: - runs-on: [self-hosted, arm-medium] + stress_test_arm_asan_s3: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [build_arm_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhcm1fYXNhbik=') }} - name: "BuzzHouse (arm_asan)" + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuLCBzMyk=') }} + name: "Stress test (arm_asan, s3)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4589,6 +3708,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (arm_asan, s3)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4609,15 +3735,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (arm_asan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (arm_asan, s3)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - buzzhouse_amd_tsan: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdHNhbik=') }} - name: "BuzzHouse (amd_tsan)" + stress_test_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_ubsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF91YnNhbik=') }} + name: "Stress test (amd_ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4627,6 +3753,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4647,15 +3780,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - buzzhouse_amd_msan: - runs-on: [self-hosted, amd-medium] + stress_test_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfbXNhbik=') }} - name: "BuzzHouse (amd_msan)" + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9tc2FuKQ==') }} + name: "Stress test (amd_msan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4665,6 +3798,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4685,15 +3825,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_msan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - buzzhouse_amd_ubsan: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_ubsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdWJzYW4p') }} - name: "BuzzHouse (amd_ubsan)" + stress_test_azure_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGF6dXJlLCBhbWRfbXNhbik=') }} + name: "Stress test (azure, amd_msan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4703,6 +3843,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (azure, amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4723,15 +3870,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (azure, amd_msan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_amd_release_master_head_1_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAxLzYp') }} - name: "Performance Comparison (amd_release, master_head, 1/6)" + stress_test_azure_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGF6dXJlLCBhbWRfdHNhbik=') }} + name: "Stress test (azure, amd_tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4741,6 +3888,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (azure, amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4761,15 +3915,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (amd_release, master_head, 1/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (azure, amd_tsan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_amd_release_master_head_2_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAyLzYp') }} - name: "Performance Comparison (amd_release, master_head, 2/6)" + ast_fuzzer_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_debug, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX2RlYnVnKQ==') }} + name: "AST fuzzer (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4779,6 +3933,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4799,15 +3960,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (amd_release, master_head, 2/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_amd_release_master_head_3_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAzLzYp') }} - name: "Performance Comparison (amd_release, master_head, 3/6)" + ast_fuzzer_arm_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_arm_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYXJtX2FzYW4p') }} + name: "AST fuzzer (arm_asan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4817,6 +3978,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (arm_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4837,15 +4005,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (amd_release, master_head, 3/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (arm_asan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_amd_release_master_head_4_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA0LzYp') }} - name: "Performance Comparison (amd_release, master_head, 4/6)" + ast_fuzzer_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3RzYW4p') }} + name: "AST fuzzer (amd_tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4855,6 +4023,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4875,15 +4050,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (amd_release, master_head, 4/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_amd_release_master_head_5_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA1LzYp') }} - name: "Performance Comparison (amd_release, master_head, 5/6)" + ast_fuzzer_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX21zYW4p') }} + name: "AST fuzzer (amd_msan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4893,6 +4068,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4913,15 +4095,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (amd_release, master_head, 5/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_amd_release_master_head_6_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA2LzYp') }} - name: "Performance Comparison (amd_release, master_head, 6/6)" + ast_fuzzer_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_ubsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3Vic2FuKQ==') }} + name: "AST fuzzer (amd_ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4931,6 +4113,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4951,15 +4140,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (amd_release, master_head, 6/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_arm_release_master_head_1_6: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAxLzYp') }} - name: "Performance Comparison (arm_release, master_head, 1/6)" + buzzhouse_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_debug, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfZGVidWcp') }} + name: "BuzzHouse (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4969,6 +4158,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4989,15 +4185,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (arm_release, master_head, 1/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_arm_release_master_head_2_6: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAyLzYp') }} - name: "Performance Comparison (arm_release, master_head, 2/6)" + buzzhouse_arm_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_arm_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhcm1fYXNhbik=') }} + name: "BuzzHouse (arm_asan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5007,6 +4203,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (arm_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5027,15 +4230,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (arm_release, master_head, 2/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (arm_asan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_arm_release_master_head_3_6: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAzLzYp') }} - name: "Performance Comparison (arm_release, master_head, 3/6)" + buzzhouse_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdHNhbik=') }} + name: "BuzzHouse (amd_tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5045,6 +4248,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5065,15 +4275,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (arm_release, master_head, 3/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_arm_release_master_head_4_6: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA0LzYp') }} - name: "Performance Comparison (arm_release, master_head, 4/6)" + buzzhouse_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfbXNhbik=') }} + name: "BuzzHouse (amd_msan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5083,6 +4293,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5103,15 +4320,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (arm_release, master_head, 4/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_arm_release_master_head_5_6: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA1LzYp') }} - name: "Performance Comparison (arm_release, master_head, 5/6)" + buzzhouse_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_ubsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdWJzYW4p') }} + name: "BuzzHouse (amd_ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5121,6 +4338,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5141,15 +4365,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (arm_release, master_head, 5/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_arm_release_master_head_6_6: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA2LzYp') }} - name: "Performance Comparison (arm_release, master_head, 6/6)" + clickbench_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q2xpY2tCZW5jaCAoYW1kX3JlbGVhc2Up') }} + name: "ClickBench (amd_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5159,6 +4383,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "ClickBench (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5179,15 +4410,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (arm_release, master_head, 6/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'ClickBench (amd_release)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_arm_release_release_base_1_6: - runs-on: [self-hosted, arm-medium] + clickbench_arm_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIHJlbGVhc2VfYmFzZSwgMS82KQ==') }} - name: "Performance Comparison (arm_release, release_base, 1/6)" + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q2xpY2tCZW5jaCAoYXJtX3JlbGVhc2Up') }} + name: "ClickBench (arm_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5197,6 +4428,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "ClickBench (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5217,15 +4455,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (arm_release, release_base, 1/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'ClickBench (arm_release)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_arm_release_release_base_2_6: - runs-on: [self-hosted, arm-medium] + sqltest: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIHJlbGVhc2VfYmFzZSwgMi82KQ==') }} - name: "Performance Comparison (arm_release, release_base, 2/6)" + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U1FMVGVzdA==') }} + name: "SQLTest" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5235,6 +4473,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "SQLTest" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5255,15 +4500,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (arm_release, release_base, 2/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'SQLTest' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_arm_release_release_base_3_6: - runs-on: [self-hosted, arm-medium] + sqllogic_test: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIHJlbGVhc2VfYmFzZSwgMy82KQ==') }} - name: "Performance Comparison (arm_release, release_base, 3/6)" + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U1FMTG9naWMgdGVzdA==') }} + name: "SQLLogic test" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5273,6 +4518,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "SQLLogic test" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5293,15 +4545,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (arm_release, release_base, 3/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'SQLLogic test' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_arm_release_release_base_4_6: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIHJlbGVhc2VfYmFzZSwgNC82KQ==') }} - name: "Performance Comparison (arm_release, release_base, 4/6)" + stateless_tests_amd_coverage_1_8: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_coverage, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDEvOCk=') }} + name: "Stateless tests (amd_coverage, 1/8)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5311,6 +4563,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_coverage, 1/8)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5331,15 +4590,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (arm_release, release_base, 4/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_coverage, 1/8)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_arm_release_release_base_5_6: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIHJlbGVhc2VfYmFzZSwgNS82KQ==') }} - name: "Performance Comparison (arm_release, release_base, 5/6)" + stateless_tests_amd_coverage_2_8: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_coverage, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDIvOCk=') }} + name: "Stateless tests (amd_coverage, 2/8)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5349,6 +4608,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_coverage, 2/8)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5369,15 +4635,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (arm_release, release_base, 5/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_coverage, 2/8)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_arm_release_release_base_6_6: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIHJlbGVhc2VfYmFzZSwgNi82KQ==') }} - name: "Performance Comparison (arm_release, release_base, 6/6)" + stateless_tests_amd_coverage_3_8: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_coverage, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDMvOCk=') }} + name: "Stateless tests (amd_coverage, 3/8)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5387,6 +4653,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_coverage, 3/8)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5407,15 +4680,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (arm_release, release_base, 6/6)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_coverage, 3/8)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - clickbench_amd_release: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q2xpY2tCZW5jaCAoYW1kX3JlbGVhc2Up') }} - name: "ClickBench (amd_release)" + stateless_tests_amd_coverage_4_8: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_coverage, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDQvOCk=') }} + name: "Stateless tests (amd_coverage, 4/8)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5425,6 +4698,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_coverage, 4/8)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5445,15 +4725,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'ClickBench (amd_release)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_coverage, 4/8)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - clickbench_arm_release: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q2xpY2tCZW5jaCAoYXJtX3JlbGVhc2Up') }} - name: "ClickBench (arm_release)" + stateless_tests_amd_coverage_5_8: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_coverage, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDUvOCk=') }} + name: "Stateless tests (amd_coverage, 5/8)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5463,6 +4743,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_coverage, 5/8)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5483,15 +4770,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'ClickBench (arm_release)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_coverage, 5/8)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - sqltest: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U1FMVGVzdA==') }} - name: "SQLTest" + stateless_tests_amd_coverage_6_8: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_coverage, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDYvOCk=') }} + name: "Stateless tests (amd_coverage, 6/8)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5501,6 +4788,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_coverage, 6/8)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5521,15 +4815,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'SQLTest' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_coverage, 6/8)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - sqllogic_test: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U1FMTG9naWMgdGVzdA==') }} - name: "SQLLogic test" + stateless_tests_amd_coverage_7_8: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_coverage, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDcvOCk=') }} + name: "Stateless tests (amd_coverage, 7/8)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5539,6 +4833,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_coverage, 7/8)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5559,15 +4860,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'SQLLogic test' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_coverage, 7/8)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - llvm_coverage: - runs-on: [self-hosted, amd-small] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, integration_tests_amd_llvm_coverage_1_5, integration_tests_amd_llvm_coverage_2_5, integration_tests_amd_llvm_coverage_3_5, integration_tests_amd_llvm_coverage_4_5, integration_tests_amd_llvm_coverage_5_5, stateless_tests_amd_llvm_coverage_1_3, stateless_tests_amd_llvm_coverage_2_3, stateless_tests_amd_llvm_coverage_3_3, stateless_tests_amd_llvm_coverage_asyncinsert_s3_storage_parallel, stateless_tests_amd_llvm_coverage_asyncinsert_s3_storage_sequential, stateless_tests_amd_llvm_coverage_old_analyzer_s3_storage_databasereplicated_wasmedge_parallel, stateless_tests_amd_llvm_coverage_old_analyzer_s3_storage_databasereplicated_wasmedge_sequential, stateless_tests_amd_llvm_coverage_parallelreplicas_s3_storage_parallel, stateless_tests_amd_llvm_coverage_parallelreplicas_s3_storage_sequential, unit_tests_amd_llvm_coverage] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'TExWTSBDb3ZlcmFnZQ==') }} - name: "LLVM Coverage" + stateless_tests_amd_coverage_8_8: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_coverage, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDgvOCk=') }} + name: "Stateless tests (amd_coverage, 8/8)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5577,6 +4878,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_coverage, 8/8)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5597,13 +4905,13 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'LLVM Coverage' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_coverage, 8/8)' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [ast_fuzzer_amd_debug, ast_fuzzer_amd_msan, ast_fuzzer_amd_tsan, ast_fuzzer_amd_ubsan, ast_fuzzer_arm_asan, build_amd_asan, build_amd_binary, build_amd_compat, build_amd_darwin, build_amd_debug, build_amd_freebsd, build_amd_msan, build_amd_musl, build_amd_release, build_amd_tsan, build_amd_ubsan, build_arm_asan, build_arm_binary, build_arm_darwin, build_arm_fuzzers, build_arm_release, build_arm_tidy, build_arm_v80compat, build_llvm_coverage_build, build_loongarch64, build_ppc64le, build_riscv64, build_s390x, buzzhouse_amd_debug, buzzhouse_amd_msan, buzzhouse_amd_tsan, buzzhouse_amd_ubsan, buzzhouse_arm_asan, clickbench_amd_release, clickbench_arm_release, compatibility_check_amd_release, compatibility_check_arm_release, config_workflow, docker_keeper_image, docker_server_image, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, install_packages_amd_release, install_packages_arm_release, integration_tests_amd_asan_db_disk_old_analyzer_1_6, integration_tests_amd_asan_db_disk_old_analyzer_2_6, integration_tests_amd_asan_db_disk_old_analyzer_3_6, integration_tests_amd_asan_db_disk_old_analyzer_4_6, integration_tests_amd_asan_db_disk_old_analyzer_5_6, integration_tests_amd_asan_db_disk_old_analyzer_6_6, integration_tests_amd_binary_1_5, integration_tests_amd_binary_2_5, integration_tests_amd_binary_3_5, integration_tests_amd_binary_4_5, integration_tests_amd_binary_5_5, integration_tests_amd_llvm_coverage_1_5, integration_tests_amd_llvm_coverage_2_5, integration_tests_amd_llvm_coverage_3_5, integration_tests_amd_llvm_coverage_4_5, integration_tests_amd_llvm_coverage_5_5, integration_tests_amd_msan_1_6, integration_tests_amd_msan_2_6, integration_tests_amd_msan_3_6, integration_tests_amd_msan_4_6, integration_tests_amd_msan_5_6, integration_tests_amd_msan_6_6, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, integration_tests_arm_binary_distributed_plan_1_4, integration_tests_arm_binary_distributed_plan_2_4, integration_tests_arm_binary_distributed_plan_3_4, integration_tests_arm_binary_distributed_plan_4_4, llvm_coverage, performance_comparison_amd_release_master_head_1_6, performance_comparison_amd_release_master_head_2_6, performance_comparison_amd_release_master_head_3_6, performance_comparison_amd_release_master_head_4_6, performance_comparison_amd_release_master_head_5_6, performance_comparison_amd_release_master_head_6_6, performance_comparison_arm_release_master_head_1_6, performance_comparison_arm_release_master_head_2_6, performance_comparison_arm_release_master_head_3_6, performance_comparison_arm_release_master_head_4_6, performance_comparison_arm_release_master_head_5_6, performance_comparison_arm_release_master_head_6_6, performance_comparison_arm_release_release_base_1_6, performance_comparison_arm_release_release_base_2_6, performance_comparison_arm_release_release_base_3_6, performance_comparison_arm_release_release_base_4_6, performance_comparison_arm_release_release_base_5_6, performance_comparison_arm_release_release_base_6_6, smoke_test_amd_darwin, sqllogic_test, sqltest, stateless_tests_amd_asan_db_disk_distributed_plan_sequential, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_distributed_plan_s3_storage_parallel, stateless_tests_amd_debug_distributed_plan_s3_storage_sequential, stateless_tests_amd_debug_parallel, stateless_tests_amd_debug_sequential, stateless_tests_amd_llvm_coverage_1_3, stateless_tests_amd_llvm_coverage_2_3, stateless_tests_amd_llvm_coverage_3_3, stateless_tests_amd_llvm_coverage_asyncinsert_s3_storage_parallel, stateless_tests_amd_llvm_coverage_asyncinsert_s3_storage_sequential, stateless_tests_amd_llvm_coverage_old_analyzer_s3_storage_databasereplicated_wasmedge_parallel, stateless_tests_amd_llvm_coverage_old_analyzer_s3_storage_databasereplicated_wasmedge_sequential, stateless_tests_amd_llvm_coverage_parallelreplicas_s3_storage_parallel, stateless_tests_amd_llvm_coverage_parallelreplicas_s3_storage_sequential, stateless_tests_amd_msan_wasmedge_parallel_1_2, stateless_tests_amd_msan_wasmedge_parallel_2_2, stateless_tests_amd_msan_wasmedge_sequential_1_2, stateless_tests_amd_msan_wasmedge_sequential_2_2, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_amd_tsan_s3_storage_parallel_1_2, stateless_tests_amd_tsan_s3_storage_parallel_2_2, stateless_tests_amd_tsan_s3_storage_sequential_1_2, stateless_tests_amd_tsan_s3_storage_sequential_2_2, stateless_tests_amd_tsan_sequential_1_2, stateless_tests_amd_tsan_sequential_2_2, stateless_tests_amd_ubsan_parallel, stateless_tests_amd_ubsan_sequential, stateless_tests_arm_asan_azure_parallel, stateless_tests_arm_asan_azure_sequential, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential, stress_test_amd_debug, stress_test_amd_msan, stress_test_amd_release, stress_test_amd_tsan, stress_test_amd_ubsan, stress_test_arm_asan, stress_test_arm_asan_s3, stress_test_azure_amd_msan, stress_test_azure_amd_tsan, unit_tests_amd_llvm_coverage, unit_tests_asan, unit_tests_msan, unit_tests_tsan, unit_tests_ubsan] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [ast_fuzzer_amd_debug, ast_fuzzer_amd_msan, ast_fuzzer_amd_tsan, ast_fuzzer_amd_ubsan, ast_fuzzer_arm_asan, build_amd_asan, build_amd_binary, build_amd_coverage, build_amd_debug, build_amd_msan, build_amd_release, build_amd_tsan, build_amd_ubsan, build_arm_asan, build_arm_binary, build_arm_release, buzzhouse_amd_debug, buzzhouse_amd_msan, buzzhouse_amd_tsan, buzzhouse_amd_ubsan, buzzhouse_arm_asan, clickbench_amd_release, clickbench_arm_release, compatibility_check_amd_release, compatibility_check_arm_release, config_workflow, docker_keeper_image, docker_server_image, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, install_packages_amd_release, install_packages_arm_release, integration_tests_amd_asan_db_disk_old_analyzer_1_6, integration_tests_amd_asan_db_disk_old_analyzer_2_6, integration_tests_amd_asan_db_disk_old_analyzer_3_6, integration_tests_amd_asan_db_disk_old_analyzer_4_6, integration_tests_amd_asan_db_disk_old_analyzer_5_6, integration_tests_amd_asan_db_disk_old_analyzer_6_6, integration_tests_amd_binary_1_5, integration_tests_amd_binary_2_5, integration_tests_amd_binary_3_5, integration_tests_amd_binary_4_5, integration_tests_amd_binary_5_5, integration_tests_amd_msan_1_6, integration_tests_amd_msan_2_6, integration_tests_amd_msan_3_6, integration_tests_amd_msan_4_6, integration_tests_amd_msan_5_6, integration_tests_amd_msan_6_6, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, integration_tests_arm_binary_distributed_plan_1_4, integration_tests_arm_binary_distributed_plan_2_4, integration_tests_arm_binary_distributed_plan_3_4, integration_tests_arm_binary_distributed_plan_4_4, sqllogic_test, sqltest, stateless_tests_amd_asan_db_disk_distributed_plan_sequential, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_coverage_1_8, stateless_tests_amd_coverage_2_8, stateless_tests_amd_coverage_3_8, stateless_tests_amd_coverage_4_8, stateless_tests_amd_coverage_5_8, stateless_tests_amd_coverage_6_8, stateless_tests_amd_coverage_7_8, stateless_tests_amd_coverage_8_8, stateless_tests_amd_debug_distributed_plan_s3_storage_parallel, stateless_tests_amd_debug_distributed_plan_s3_storage_sequential, stateless_tests_amd_debug_parallel, stateless_tests_amd_debug_sequential, stateless_tests_amd_msan_wasmedge_parallel_1_2, stateless_tests_amd_msan_wasmedge_parallel_2_2, stateless_tests_amd_msan_wasmedge_sequential_1_2, stateless_tests_amd_msan_wasmedge_sequential_2_2, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_amd_tsan_s3_storage_parallel_1_2, stateless_tests_amd_tsan_s3_storage_parallel_2_2, stateless_tests_amd_tsan_s3_storage_sequential_1_2, stateless_tests_amd_tsan_s3_storage_sequential_2_2, stateless_tests_amd_tsan_sequential_1_2, stateless_tests_amd_tsan_sequential_2_2, stateless_tests_amd_ubsan_parallel, stateless_tests_amd_ubsan_sequential, stateless_tests_arm_asan_azure_parallel, stateless_tests_arm_asan_azure_sequential, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential, stress_test_amd_debug, stress_test_amd_msan, stress_test_amd_release, stress_test_amd_tsan, stress_test_amd_ubsan, stress_test_arm_asan, stress_test_arm_asan_s3, stress_test_azure_amd_msan, stress_test_azure_amd_tsan, unit_tests_asan, unit_tests_msan, unit_tests_tsan, unit_tests_ubsan] if: ${{ always() }} name: "Finish Workflow" outputs: @@ -5615,6 +4923,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Finish Workflow" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5638,3 +4953,239 @@ jobs: PYTHONUNBUFFERED=1 python3 -m praktika run 'Finish Workflow' --workflow "MasterCI" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log + +########################################################################################## +##################################### ALTINITY JOBS ###################################### +########################################################################################## + + GrypeScanServer: + needs: [config_workflow, docker_server_image] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} + strategy: + fail-fast: false + matrix: + suffix: ['', '-alpine'] + uses: ./.github/workflows/grype_scan.yml + secrets: inherit + with: + docker_image: altinityinfra/clickhouse-server + version: ${{ fromJson(needs.config_workflow.outputs.data).JOB_KV_DATA.version.string }} + tag-suffix: ${{ matrix.suffix }} + GrypeScanKeeper: + needs: [config_workflow, docker_keeper_image] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} + uses: ./.github/workflows/grype_scan.yml + secrets: inherit + with: + docker_image: altinityinfra/clickhouse-keeper + version: ${{ fromJson(needs.config_workflow.outputs.data).JOB_KV_DATA.version.string }} + + RegressionTestsRelease: + needs: [config_workflow, build_amd_binary, stateless_tests_amd_debug_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(fromJson(needs.config_workflow.outputs.data).JOB_KV_DATA.ci_exclude_tags, 'regression')}} + uses: ./.github/workflows/regression.yml + secrets: inherit + with: + runner_type: altinity-regression-tester + commit: c7897a6a858a9ef9c7b3c519e7291cfd3c2ec646 + arch: release + build_sha: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + timeout_minutes: 210 + workflow_config: ${{ needs.config_workflow.outputs.data }} + RegressionTestsAarch64: + needs: [config_workflow, build_arm_binary, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(fromJson(needs.config_workflow.outputs.data).JOB_KV_DATA.ci_exclude_tags, 'regression') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.custom_data.ci_exclude_tags, 'aarch64')}} + uses: ./.github/workflows/regression.yml + secrets: inherit + with: + runner_type: altinity-regression-tester-aarch64 + commit: c7897a6a858a9ef9c7b3c519e7291cfd3c2ec646 + arch: aarch64 + build_sha: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + timeout_minutes: 210 + workflow_config: ${{ needs.config_workflow.outputs.data }} + + SignRelease: + needs: [config_workflow, build_amd_release] + if: ${{ !failure() && !cancelled() }} + uses: ./.github/workflows/reusable_sign.yml + secrets: inherit + with: + test_name: Sign release + runner_type: altinity-style-checker + data: ${{ needs.config_workflow.outputs.data }} + SignAarch64: + needs: [config_workflow, build_arm_release] + if: ${{ !failure() && !cancelled() }} + uses: ./.github/workflows/reusable_sign.yml + secrets: inherit + with: + test_name: Sign aarch64 + runner_type: altinity-style-checker-aarch64 + data: ${{ needs.config_workflow.outputs.data }} + + FinishCIReport: + if: ${{ !cancelled() }} + needs: + - config_workflow + - dockers_build_amd + - dockers_build_arm + - dockers_build_multiplatform_manifest + - build_amd_debug + - build_amd_asan + - build_amd_tsan + - build_amd_msan + - build_amd_ubsan + - build_amd_binary + - build_arm_asan + - build_arm_binary + - build_amd_coverage + - build_amd_release + - build_arm_release + - unit_tests_asan + - unit_tests_tsan + - unit_tests_msan + - unit_tests_ubsan + - docker_server_image + - docker_keeper_image + - install_packages_amd_release + - install_packages_arm_release + - compatibility_check_amd_release + - compatibility_check_arm_release + - stateless_tests_amd_asan_distributed_plan_parallel_1_2 + - stateless_tests_amd_asan_distributed_plan_parallel_2_2 + - stateless_tests_amd_asan_db_disk_distributed_plan_sequential + - stateless_tests_amd_debug_parallel + - stateless_tests_amd_debug_sequential + - stateless_tests_amd_tsan_parallel_1_2 + - stateless_tests_amd_tsan_parallel_2_2 + - stateless_tests_amd_tsan_sequential_1_2 + - stateless_tests_amd_tsan_sequential_2_2 + - stateless_tests_amd_msan_wasmedge_parallel_1_2 + - stateless_tests_amd_msan_wasmedge_parallel_2_2 + - stateless_tests_amd_msan_wasmedge_sequential_1_2 + - stateless_tests_amd_msan_wasmedge_sequential_2_2 + - stateless_tests_amd_ubsan_parallel + - stateless_tests_amd_ubsan_sequential + - stateless_tests_amd_debug_distributed_plan_s3_storage_parallel + - stateless_tests_amd_debug_distributed_plan_s3_storage_sequential + - stateless_tests_amd_tsan_s3_storage_parallel_1_2 + - stateless_tests_amd_tsan_s3_storage_parallel_2_2 + - stateless_tests_amd_tsan_s3_storage_sequential_1_2 + - stateless_tests_amd_tsan_s3_storage_sequential_2_2 + - stateless_tests_arm_binary_parallel + - stateless_tests_arm_binary_sequential + - stateless_tests_arm_asan_azure_parallel + - stateless_tests_arm_asan_azure_sequential + - integration_tests_amd_asan_db_disk_old_analyzer_1_6 + - integration_tests_amd_asan_db_disk_old_analyzer_2_6 + - integration_tests_amd_asan_db_disk_old_analyzer_3_6 + - integration_tests_amd_asan_db_disk_old_analyzer_4_6 + - integration_tests_amd_asan_db_disk_old_analyzer_5_6 + - integration_tests_amd_asan_db_disk_old_analyzer_6_6 + - integration_tests_amd_binary_1_5 + - integration_tests_amd_binary_2_5 + - integration_tests_amd_binary_3_5 + - integration_tests_amd_binary_4_5 + - integration_tests_amd_binary_5_5 + - integration_tests_arm_binary_distributed_plan_1_4 + - integration_tests_arm_binary_distributed_plan_2_4 + - integration_tests_arm_binary_distributed_plan_3_4 + - integration_tests_arm_binary_distributed_plan_4_4 + - integration_tests_amd_tsan_1_6 + - integration_tests_amd_tsan_2_6 + - integration_tests_amd_tsan_3_6 + - integration_tests_amd_tsan_4_6 + - integration_tests_amd_tsan_5_6 + - integration_tests_amd_tsan_6_6 + - integration_tests_amd_msan_1_6 + - integration_tests_amd_msan_2_6 + - integration_tests_amd_msan_3_6 + - integration_tests_amd_msan_4_6 + - integration_tests_amd_msan_5_6 + - integration_tests_amd_msan_6_6 + - stress_test_amd_release + - stress_test_amd_debug + - stress_test_amd_tsan + - stress_test_arm_asan + - stress_test_arm_asan_s3 + - stress_test_amd_ubsan + - stress_test_amd_msan + - stress_test_azure_amd_msan + - stress_test_azure_amd_tsan + - ast_fuzzer_amd_debug + - ast_fuzzer_arm_asan + - ast_fuzzer_amd_tsan + - ast_fuzzer_amd_msan + - ast_fuzzer_amd_ubsan + - buzzhouse_amd_debug + - buzzhouse_arm_asan + - buzzhouse_amd_tsan + - buzzhouse_amd_msan + - buzzhouse_amd_ubsan + - clickbench_amd_release + - clickbench_arm_release + - sqltest + - sqllogic_test + - stateless_tests_amd_coverage_1_8 + - stateless_tests_amd_coverage_2_8 + - stateless_tests_amd_coverage_3_8 + - stateless_tests_amd_coverage_4_8 + - stateless_tests_amd_coverage_5_8 + - stateless_tests_amd_coverage_6_8 + - stateless_tests_amd_coverage_7_8 + - stateless_tests_amd_coverage_8_8 + - finish_workflow + - GrypeScanServer + - GrypeScanKeeper + - RegressionTestsRelease + - RegressionTestsAarch64 + - SignRelease + - SignAarch64 + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + steps: + - name: Check out repository code + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 + with: + clear-repository: true + - name: Finalize workflow report + if: ${{ !cancelled() }} + uses: ./.github/actions/create_workflow_report + with: + workflow_config: ${{ toJson(needs) }} + final: true + + SourceUpload: + needs: [config_workflow, build_amd_release] + if: ${{ !failure() && !cancelled() }} + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + env: + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + VERSION: ${{ fromJson(needs.config_workflow.outputs.data).JOB_KV_DATA.version.string }} + steps: + - name: Check out repository code + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 + with: + clear-repository: true + ref: ${{ fromJson(needs.config_workflow.outputs.data).git_ref }} + submodules: true + fetch-depth: 0 + filter: tree:0 + - name: Install aws cli + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: arm64 + - name: Create source tar + run: | + cd .. && tar czf $RUNNER_TEMP/build_source.src.tar.gz ClickHouse/ + - name: Upload source tar + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + S3_PATH="REFs/$GITHUB_REF_NAME/$COMMIT_SHA/build_amd_release" + else + S3_PATH="PRs/$PR_NUMBER/$COMMIT_SHA/build_amd_release" + fi + + aws s3 cp $RUNNER_TEMP/build_source.src.tar.gz s3://altinity-build-artifacts/$S3_PATH/clickhouse-$VERSION.src.tar.gz diff --git a/.github/workflows/merge_queue.yml b/.github/workflows/merge_queue.yml deleted file mode 100644 index 3e3660948b77..000000000000 --- a/.github/workflows/merge_queue.yml +++ /dev/null @@ -1,279 +0,0 @@ -# generated by praktika - -name: MergeQueueCI - -on: - merge_group: - -env: - # Force the stdout and stderr streams to be unbuffered - PYTHONUNBUFFERED: 1 - CHECKOUT_REF: "" - - -jobs: - - config_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [] - name: "Config Workflow" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Config Workflow' --workflow "MergeQueueCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - dockers_build_amd: - runs-on: [self-hosted, style-checker] - needs: [config_workflow] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} - name: "Dockers Build (amd)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Dockers Build (amd)' --workflow "MergeQueueCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} - name: "Dockers Build (arm)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Dockers Build (arm)' --workflow "MergeQueueCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - style_check: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3R5bGUgY2hlY2s=') }} - name: "Style check" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Style check' --workflow "MergeQueueCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - fast_test: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RmFzdCB0ZXN0') }} - name: "Fast test" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Fast test' --workflow "MergeQueueCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - build_amd_binary: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }} - name: "Build (amd_binary)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_binary)' --workflow "MergeQueueCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [build_amd_binary, config_workflow, dockers_build_amd, dockers_build_arm, fast_test, style_check] - if: ${{ always() }} - name: "Finish Workflow" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Finish Workflow' --workflow "MergeQueueCI" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log diff --git a/.github/workflows/nightly_coverage.yml b/.github/workflows/nightly_coverage.yml deleted file mode 100644 index 68ff59059c1c..000000000000 --- a/.github/workflows/nightly_coverage.yml +++ /dev/null @@ -1,510 +0,0 @@ -# generated by praktika - -name: NightlyCoverage -on: - schedule: - - cron: 13 2 * * * - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }} - -env: - PYTHONUNBUFFERED: 1 - CHECKOUT_REF: "" - - -jobs: - - config_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [] - name: "Config Workflow" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Config Workflow' --workflow "NightlyCoverage" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - dockers_build_amd: - runs-on: [self-hosted, style-checker] - needs: [config_workflow] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} - name: "Dockers Build (amd)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Dockers Build (amd)' --workflow "NightlyCoverage" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} - name: "Dockers Build (arm)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Dockers Build (arm)' --workflow "NightlyCoverage" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - build_amd_coverage: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9jb3ZlcmFnZSk=') }} - name: "Build (amd_coverage)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_coverage)' --workflow "NightlyCoverage" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_coverage_1_8: - runs-on: [self-hosted, amd-small] - needs: [build_amd_coverage, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDEvOCk=') }} - name: "Stateless tests (amd_coverage, 1/8)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_coverage, 1/8)' --workflow "NightlyCoverage" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_coverage_2_8: - runs-on: [self-hosted, amd-small] - needs: [build_amd_coverage, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDIvOCk=') }} - name: "Stateless tests (amd_coverage, 2/8)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_coverage, 2/8)' --workflow "NightlyCoverage" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_coverage_3_8: - runs-on: [self-hosted, amd-small] - needs: [build_amd_coverage, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDMvOCk=') }} - name: "Stateless tests (amd_coverage, 3/8)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_coverage, 3/8)' --workflow "NightlyCoverage" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_coverage_4_8: - runs-on: [self-hosted, amd-small] - needs: [build_amd_coverage, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDQvOCk=') }} - name: "Stateless tests (amd_coverage, 4/8)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_coverage, 4/8)' --workflow "NightlyCoverage" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_coverage_5_8: - runs-on: [self-hosted, amd-small] - needs: [build_amd_coverage, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDUvOCk=') }} - name: "Stateless tests (amd_coverage, 5/8)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_coverage, 5/8)' --workflow "NightlyCoverage" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_coverage_6_8: - runs-on: [self-hosted, amd-small] - needs: [build_amd_coverage, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDYvOCk=') }} - name: "Stateless tests (amd_coverage, 6/8)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_coverage, 6/8)' --workflow "NightlyCoverage" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_coverage_7_8: - runs-on: [self-hosted, amd-small] - needs: [build_amd_coverage, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDcvOCk=') }} - name: "Stateless tests (amd_coverage, 7/8)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_coverage, 7/8)' --workflow "NightlyCoverage" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_coverage_8_8: - runs-on: [self-hosted, amd-small] - needs: [build_amd_coverage, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfY292ZXJhZ2UsIDgvOCk=') }} - name: "Stateless tests (amd_coverage, 8/8)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_coverage, 8/8)' --workflow "NightlyCoverage" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [build_amd_coverage, config_workflow, dockers_build_amd, dockers_build_arm, stateless_tests_amd_coverage_1_8, stateless_tests_amd_coverage_2_8, stateless_tests_amd_coverage_3_8, stateless_tests_amd_coverage_4_8, stateless_tests_amd_coverage_5_8, stateless_tests_amd_coverage_6_8, stateless_tests_amd_coverage_7_8, stateless_tests_amd_coverage_8_8] - if: ${{ always() }} - name: "Finish Workflow" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Finish Workflow' --workflow "NightlyCoverage" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log diff --git a/.github/workflows/nightly_fuzzers.yml b/.github/workflows/nightly_fuzzers.yml deleted file mode 100644 index a01519a4b572..000000000000 --- a/.github/workflows/nightly_fuzzers.yml +++ /dev/null @@ -1,244 +0,0 @@ -# generated by praktika - -name: NightlyFuzzers -on: - schedule: - - cron: 13 3 * * * - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }} - -env: - PYTHONUNBUFFERED: 1 - CHECKOUT_REF: "" - - -jobs: - - config_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [] - name: "Config Workflow" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Config Workflow' --workflow "NightlyFuzzers" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - dockers_build_amd: - runs-on: [self-hosted, style-checker] - needs: [config_workflow] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} - name: "Dockers Build (amd)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Dockers Build (amd)' --workflow "NightlyFuzzers" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} - name: "Dockers Build (arm)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Dockers Build (arm)' --workflow "NightlyFuzzers" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - build_arm_fuzzers: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9mdXp6ZXJzKQ==') }} - name: "Build (arm_fuzzers)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_fuzzers)' --workflow "NightlyFuzzers" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - libfuzzer_tests: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_fuzzers, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'bGliRnV6emVyIHRlc3Rz') }} - name: "libFuzzer tests" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'libFuzzer tests' --workflow "NightlyFuzzers" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [build_arm_fuzzers, config_workflow, dockers_build_amd, dockers_build_arm, libfuzzer_tests] - if: ${{ always() }} - name: "Finish Workflow" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Finish Workflow' --workflow "NightlyFuzzers" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log diff --git a/.github/workflows/nightly_jepsen.yml b/.github/workflows/nightly_jepsen.yml deleted file mode 100644 index 99ebaa7d70ce..000000000000 --- a/.github/workflows/nightly_jepsen.yml +++ /dev/null @@ -1,244 +0,0 @@ -# generated by praktika - -name: NightlyJepsen -on: - schedule: - - cron: 13 4 * * * - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }} - -env: - PYTHONUNBUFFERED: 1 - CHECKOUT_REF: "" - - -jobs: - - config_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [] - name: "Config Workflow" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Config Workflow' --workflow "NightlyJepsen" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - dockers_build_amd: - runs-on: [self-hosted, style-checker] - needs: [config_workflow] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} - name: "Dockers Build (amd)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Dockers Build (amd)' --workflow "NightlyJepsen" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} - name: "Dockers Build (arm)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Dockers Build (arm)' --workflow "NightlyJepsen" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - build_amd_binary: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }} - name: "Build (amd_binary)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_binary)' --workflow "NightlyJepsen" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - clickhouse_keeper_jepsen: - runs-on: [self-hosted, style-checker] - needs: [build_amd_binary, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q2xpY2tIb3VzZSBLZWVwZXIgSmVwc2Vu') }} - name: "ClickHouse Keeper Jepsen" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'ClickHouse Keeper Jepsen' --workflow "NightlyJepsen" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [build_amd_binary, clickhouse_keeper_jepsen, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ always() }} - name: "Finish Workflow" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Finish Workflow' --workflow "NightlyJepsen" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log diff --git a/.github/workflows/nightly_statistics.yml b/.github/workflows/nightly_statistics.yml deleted file mode 100644 index afeba342c136..000000000000 --- a/.github/workflows/nightly_statistics.yml +++ /dev/null @@ -1,91 +0,0 @@ -# generated by praktika - -name: NightlyStatistics -on: - schedule: - - cron: 13 5 * * * - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }} - -env: - PYTHONUNBUFFERED: 1 - CHECKOUT_REF: "" - - -jobs: - - config_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [] - name: "Config Workflow" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Config Workflow' --workflow "NightlyStatistics" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - collect_statistics: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow] - name: "Collect Statistics" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Collect Statistics' --workflow "NightlyStatistics" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log diff --git a/.github/workflows/optimize_toolchain.yml b/.github/workflows/optimize_toolchain.yml deleted file mode 100644 index 80f61f17d778..000000000000 --- a/.github/workflows/optimize_toolchain.yml +++ /dev/null @@ -1,294 +0,0 @@ -# generated by praktika - -name: OptimizeToolchain -on: - workflow_dispatch: - inputs: - -env: - PYTHONUNBUFFERED: 1 - CHECKOUT_REF: "" - -# Allow updating GH commit statuses and PR comments to post an actual job reports link -permissions: write-all - -jobs: - - config_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [] - name: "Config Workflow" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_inputs.json << 'EOF' - ${{ toJson(github.event.inputs) }} - EOF - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Config Workflow' --workflow "OptimizeToolchain" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - dockers_build_amd: - runs-on: [self-hosted, style-checker] - needs: [config_workflow] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} - name: "Dockers Build (amd)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_inputs.json << 'EOF' - ${{ toJson(github.event.inputs) }} - EOF - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Dockers Build (amd)' --workflow "OptimizeToolchain" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} - name: "Dockers Build (arm)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_inputs.json << 'EOF' - ${{ toJson(github.event.inputs) }} - EOF - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Dockers Build (arm)' --workflow "OptimizeToolchain" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - build_toolchain_pgo_bolt_amd64: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgVG9vbGNoYWluIChQR08sIEJPTFQpIChhbWQ2NCk=') }} - name: "Build Toolchain (PGO, BOLT) (amd64)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_inputs.json << 'EOF' - ${{ toJson(github.event.inputs) }} - EOF - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build Toolchain (PGO, BOLT) (amd64)' --workflow "OptimizeToolchain" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - build_toolchain_pgo_bolt_aarch64: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgVG9vbGNoYWluIChQR08sIEJPTFQpIChhYXJjaDY0KQ==') }} - name: "Build Toolchain (PGO, BOLT) (aarch64)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_inputs.json << 'EOF' - ${{ toJson(github.event.inputs) }} - EOF - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build Toolchain (PGO, BOLT) (aarch64)' --workflow "OptimizeToolchain" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - update_toolchain_dockerfile: - runs-on: [self-hosted, style-checker] - needs: [build_toolchain_pgo_bolt_aarch64, build_toolchain_pgo_bolt_amd64, config_workflow, dockers_build_amd, dockers_build_arm] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VXBkYXRlIFRvb2xjaGFpbiBEb2NrZXJmaWxl') }} - name: "Update Toolchain Dockerfile" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_inputs.json << 'EOF' - ${{ toJson(github.event.inputs) }} - EOF - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Update Toolchain Dockerfile' --workflow "OptimizeToolchain" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [build_toolchain_pgo_bolt_aarch64, build_toolchain_pgo_bolt_amd64, config_workflow, dockers_build_amd, dockers_build_arm, update_toolchain_dockerfile] - if: ${{ always() }} - name: "Finish Workflow" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - cat > ./ci/tmp/workflow_inputs.json << 'EOF' - ${{ toJson(github.event.inputs) }} - EOF - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Finish Workflow' --workflow "OptimizeToolchain" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 1ebe65165ca6..53b663f1c375 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -3,24 +3,45 @@ name: PR on: + workflow_dispatch: + inputs: + no_cache: + description: Run without cache + required: false + type: boolean + default: false pull_request: - branches: ['master'] + branches: ['antalya', 'releases/*', 'antalya-*'] env: # Force the stdout and stderr streams to be unbuffered PYTHONUNBUFFERED: 1 + GH_TOKEN: ${{ github.token }} DISABLE_CI_MERGE_COMMIT: ${{ vars.DISABLE_CI_MERGE_COMMIT || '0' }} - DISABLE_CI_CACHE: ${{ vars.DISABLE_CI_CACHE || '0' }} + DISABLE_CI_CACHE: ${{ github.event.inputs.no_cache || '0' }} CHECKOUT_REF: ${{ vars.DISABLE_CI_MERGE_COMMIT == '1' && github.event.pull_request.head.sha || '' }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }} + CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + AZURE_STORAGE_KEY: ${{ secrets.AZURE_STORAGE_KEY }} + AZURE_ACCOUNT_NAME: ${{ secrets.AZURE_ACCOUNT_NAME }} + AZURE_CONTAINER_NAME: ${{ secrets.AZURE_CONTAINER_NAME }} + AZURE_STORAGE_ACCOUNT_URL: "https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/" + ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }} + -# Allow updating GH commit statuses and PR comments to post an actual job reports link -permissions: write-all jobs: config_workflow: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [] + if: ${{ github.repository == github.event.pull_request.head.repo.full_name || github.event_name == 'workflow_dispatch' }} name: "Config Workflow" outputs: data: ${{ steps.run.outputs.DATA }} @@ -31,6 +52,27 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + if: ${{ !failure() && env.AWS_ACCESS_KEY_ID && env.AWS_SECRET_ACCESS_KEY }} + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -56,7 +98,7 @@ jobs: for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log dockers_build_amd: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} name: "Dockers Build (amd)" @@ -69,6 +111,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (amd)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -94,7 +143,7 @@ jobs: for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log dockers_build_arm: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] needs: [config_workflow] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} name: "Dockers Build (arm)" @@ -107,6 +156,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (arm)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -132,7 +188,7 @@ jobs: for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log dockers_build_multiplatform_manifest: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] needs: [config_workflow, dockers_build_amd, dockers_build_arm] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAobXVsdGlwbGF0Zm9ybSBtYW5pZmVzdCk=') }} name: "Dockers Build (multiplatform manifest)" @@ -145,6 +201,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (multiplatform manifest)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -169,11 +232,11 @@ jobs: prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - style_check: - runs-on: [self-hosted, style-checker-aarch64] + fast_test: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3R5bGUgY2hlY2s=') }} - name: "Style check" + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RmFzdCB0ZXN0') }} + name: "Fast test" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -183,6 +246,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Fast test" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -203,15 +273,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Style check' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Fast test' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - code_review: - runs-on: [self-hosted, style-checker-aarch64] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29kZSBSZXZpZXc=') }} - name: "Code Review" + build_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }} + name: "Build (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -221,6 +291,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -241,15 +318,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Code Review' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_debug)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - docs_check: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9jcyBjaGVjaw==') }} - name: "Docs check" + build_amd_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }} + name: "Build (amd_asan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -259,6 +336,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -279,15 +363,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Docs check' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_asan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - fast_test: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RmFzdCB0ZXN0') }} - name: "Fast test" + build_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }} + name: "Build (amd_tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -297,6 +381,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -317,15 +408,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Fast test' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_tsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_arm_tidy: - runs-on: [self-hosted, arm-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV90aWR5KQ==') }} - name: "Build (arm_tidy)" + build_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9tc2FuKQ==') }} + name: "Build (amd_msan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -335,6 +426,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -355,15 +453,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_tidy)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_msan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_amd_debug: - runs-on: [self-hosted, arm-large] - needs: [build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }} - name: "Build (amd_debug)" + build_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF91YnNhbik=') }} + name: "Build (amd_ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -373,6 +471,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -393,15 +498,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_debug)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_ubsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_amd_asan: - runs-on: [self-hosted, arm-large] - needs: [build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }} - name: "Build (amd_asan)" + build_amd_binary: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }} + name: "Build (amd_binary)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -411,6 +516,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_binary)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -431,15 +543,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_asan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_binary)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_amd_tsan: - runs-on: [self-hosted, arm-large] - needs: [build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }} - name: "Build (amd_tsan)" + build_arm_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9hc2FuKQ==') }} + name: "Build (arm_asan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -449,6 +561,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -469,15 +588,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_tsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_asan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_amd_msan: - runs-on: [self-hosted, arm-large] - needs: [build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9tc2FuKQ==') }} - name: "Build (amd_msan)" + build_arm_binary: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9iaW5hcnkp') }} + name: "Build (arm_binary)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -487,6 +606,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_binary)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -507,15 +633,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_msan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_binary)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_amd_ubsan: - runs-on: [self-hosted, arm-large] - needs: [build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF91YnNhbik=') }} - name: "Build (amd_ubsan)" + build_arm_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV90c2FuKQ==') }} + name: "Build (arm_tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -525,6 +651,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -545,15 +678,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_ubsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_tsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_amd_binary: - runs-on: [self-hosted, arm-large] - needs: [build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }} - name: "Build (amd_binary)" + build_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }} + name: "Build (amd_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -563,6 +696,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -583,15 +723,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_binary)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_release)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_arm_asan: - runs-on: [self-hosted, arm-large] - needs: [build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9hc2FuKQ==') }} - name: "Build (arm_asan)" + build_arm_release: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }} + name: "Build (arm_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -601,6 +741,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -621,15 +768,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_asan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_release)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_arm_binary: - runs-on: [self-hosted, arm-large] - needs: [build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9iaW5hcnkp') }} - name: "Build (arm_binary)" + quick_functional_tests: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [build_amd_debug, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UXVpY2sgZnVuY3Rpb25hbCB0ZXN0cw==') }} + name: "Quick functional tests" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -639,6 +786,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Quick functional tests" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -659,15 +813,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_binary)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Quick functional tests' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_arm_tsan: - runs-on: [self-hosted, arm-large] - needs: [build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV90c2FuKQ==') }} - name: "Build (arm_tsan)" + stateless_tests_arm_asan_targeted: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_arm_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbiwgdGFyZ2V0ZWQp') }} + name: "Stateless tests (arm_asan, targeted)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -677,6 +831,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_asan, targeted)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -697,15 +858,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_tsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (arm_asan, targeted)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_amd_release: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }} - name: "Build (amd_release)" + integration_tests_amd_asan_targeted: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCB0YXJnZXRlZCk=') }} + name: "Integration tests (amd_asan, targeted)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -715,6 +876,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, targeted)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -735,15 +903,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_release)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, targeted)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_arm_release: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }} - name: "Build (arm_release)" + ast_fuzzer_amd_debug_targeted: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_debug, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX2RlYnVnLCB0YXJnZXRlZCk=') }} + name: "AST fuzzer (amd_debug, targeted)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -753,6 +921,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_debug, targeted)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -773,15 +948,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_release)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_debug, targeted)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_amd_darwin: - runs-on: [self-hosted, amd-large] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }} - name: "Build (amd_darwin)" + ast_fuzzer_amd_debug_targeted_old_compatibility: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_debug, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX2RlYnVnLCB0YXJnZXRlZCwgb2xkX2NvbXBhdGliaWxpdHkp') }} + name: "AST fuzzer (amd_debug, targeted, old_compatibility)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -791,6 +966,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_debug, targeted, old_compatibility)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -811,15 +993,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_darwin)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_debug, targeted, old_compatibility)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_arm_darwin: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }} - name: "Build (arm_darwin)" + stateless_tests_amd_asan_distributed_plan_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -829,6 +1011,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -849,15 +1038,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_darwin)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_arm_v80compat: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV92ODBjb21wYXQp') }} - name: "Build (arm_v80compat)" + stateless_tests_amd_asan_distributed_plan_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -867,6 +1056,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -887,15 +1083,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_v80compat)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_amd_freebsd: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9mcmVlYnNkKQ==') }} - name: "Build (amd_freebsd)" + stateless_tests_amd_asan_db_disk_distributed_plan_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGIgZGlzaywgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }} + name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -905,6 +1101,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -925,15 +1128,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_freebsd)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_asan, db disk, distributed plan, sequential)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_ppc64le: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKHBwYzY0bGUp') }} - name: "Build (ppc64le)" + stateless_tests_amd_debug_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_debug, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_debug, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -943,6 +1146,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -963,15 +1173,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (ppc64le)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_amd_compat: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9jb21wYXQp') }} - name: "Build (amd_compat)" + stateless_tests_amd_debug_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (amd_debug, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -981,6 +1191,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1001,15 +1218,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_compat)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_amd_musl: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9tdXNsKQ==') }} - name: "Build (amd_musl)" + stateless_tests_amd_tsan_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_tsan, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1019,6 +1236,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1039,15 +1263,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (amd_musl)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_riscv64: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKHJpc2N2NjQp') }} - name: "Build (riscv64)" + stateless_tests_amd_tsan_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_tsan, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1057,6 +1281,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1077,15 +1308,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (riscv64)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_s390x: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKHMzOTB4KQ==') }} - name: "Build (s390x)" + stateless_tests_amd_tsan_sequential_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }} + name: "Stateless tests (amd_tsan, sequential, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1095,6 +1326,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, sequential, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1115,15 +1353,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (s390x)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_loongarch64: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGxvb25nYXJjaDY0KQ==') }} - name: "Build (loongarch64)" + stateless_tests_amd_tsan_sequential_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }} + name: "Stateless tests (amd_tsan, sequential, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1133,6 +1371,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, sequential, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1153,15 +1398,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (loongarch64)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_arm_fuzzers: - runs-on: [self-hosted, arm-large] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9mdXp6ZXJzKQ==') }} - name: "Build (arm_fuzzers)" + stateless_tests_amd_msan_wasmedge_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgV2FzbUVkZ2UsIHBhcmFsbGVsLCAxLzIp') }} + name: "Stateless tests (amd_msan, WasmEdge, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1171,6 +1416,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, WasmEdge, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1191,15 +1443,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (arm_fuzzers)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_msan, WasmEdge, parallel, 1/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - build_llvm_coverage_build: - runs-on: [self-hosted, amd-large] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGxsdm1fY292ZXJhZ2VfYnVpbGQp') }} - name: "Build (llvm_coverage_build)" + stateless_tests_amd_msan_wasmedge_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgV2FzbUVkZ2UsIHBhcmFsbGVsLCAyLzIp') }} + name: "Stateless tests (amd_msan, WasmEdge, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1209,6 +1461,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, WasmEdge, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1229,15 +1488,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Build (llvm_coverage_build)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_msan, WasmEdge, parallel, 2/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - smoke_test_amd_darwin: - runs-on: [self-hosted, amd_macos_m1] - needs: [build_amd_asan, build_amd_darwin, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U21va2UgdGVzdCAoYW1kX2Rhcndpbik=') }} - name: "Smoke test (amd_darwin)" + stateless_tests_amd_msan_wasmedge_sequential_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgV2FzbUVkZ2UsIHNlcXVlbnRpYWwsIDEvMik=') }} + name: "Stateless tests (amd_msan, WasmEdge, sequential, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1247,6 +1506,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, WasmEdge, sequential, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1267,15 +1533,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Smoke test (amd_darwin)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_msan, WasmEdge, sequential, 1/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - quick_functional_tests: - runs-on: [self-hosted, amd-small] - needs: [build_amd_debug, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UXVpY2sgZnVuY3Rpb25hbCB0ZXN0cw==') }} - name: "Quick functional tests" + stateless_tests_amd_msan_wasmedge_sequential_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgV2FzbUVkZ2UsIHNlcXVlbnRpYWwsIDIvMik=') }} + name: "Stateless tests (amd_msan, WasmEdge, sequential, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1285,6 +1551,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, WasmEdge, sequential, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1305,15 +1578,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Quick functional tests' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_msan, WasmEdge, sequential, 2/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_arm_asan_targeted: - runs-on: [self-hosted, arm-medium] - needs: [build_arm_asan, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbiwgdGFyZ2V0ZWQp') }} - name: "Stateless tests (arm_asan, targeted)" + stateless_tests_amd_ubsan_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_ubsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_ubsan, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1323,6 +1596,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_ubsan, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1343,15 +1623,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (arm_asan, targeted)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_asan_targeted: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCB0YXJnZXRlZCk=') }} - name: "Integration tests (amd_asan, targeted)" + stateless_tests_amd_ubsan_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_ubsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (amd_ubsan, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1361,6 +1641,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_ubsan, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1381,15 +1668,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, targeted)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - ast_fuzzer_amd_debug_targeted: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_debug, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX2RlYnVnLCB0YXJnZXRlZCk=') }} - name: "AST fuzzer (amd_debug, targeted)" + stateless_tests_amd_debug_distributed_plan_s3_storage_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1399,6 +1686,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1419,15 +1713,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_debug, targeted)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - ast_fuzzer_amd_debug_targeted_old_compatibility: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_debug, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX2RlYnVnLCB0YXJnZXRlZCwgb2xkX2NvbXBhdGliaWxpdHkp') }} - name: "AST fuzzer (amd_debug, targeted, old_compatibility)" + stateless_tests_amd_debug_distributed_plan_s3_storage_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1437,6 +1731,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1457,15 +1758,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_debug, targeted, old_compatibility)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_asan_flaky_check: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZmxha3kgY2hlY2sp') }} - name: "Stateless tests (amd_asan, flaky check)" + stateless_tests_amd_tsan_s3_storage_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_tsan, s3 storage, parallel, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1475,6 +1776,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, parallel, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1495,15 +1803,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_asan, flaky check)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 1/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_tsan_flaky_check: - runs-on: [self-hosted, amd-large] - needs: [build_amd_tsan, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgZmxha3kgY2hlY2sp') }} - name: "Stateless tests (amd_tsan, flaky check)" + stateless_tests_amd_tsan_s3_storage_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_tsan, s3 storage, parallel, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1513,6 +1821,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, parallel, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1533,15 +1848,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, flaky check)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 2/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_msan_flaky_check: - runs-on: [self-hosted, amd-large] - needs: [build_amd_msan, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgZmxha3kgY2hlY2sp') }} - name: "Stateless tests (amd_msan, flaky check)" + stateless_tests_amd_tsan_s3_storage_sequential_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMS8yKQ==') }} + name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1551,6 +1866,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1571,15 +1893,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_msan, flaky check)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_ubsan_flaky_check: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_ubsan, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIGZsYWt5IGNoZWNrKQ==') }} - name: "Stateless tests (amd_ubsan, flaky check)" + stateless_tests_amd_tsan_s3_storage_sequential_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMi8yKQ==') }} + name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1589,6 +1911,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1609,15 +1938,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_ubsan, flaky check)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_debug_flaky_check: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_debug, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGZsYWt5IGNoZWNrKQ==') }} - name: "Stateless tests (amd_debug, flaky check)" + stateless_tests_arm_binary_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBwYXJhbGxlbCk=') }} + name: "Stateless tests (arm_binary, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1627,6 +1956,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_binary, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1647,15 +1983,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_debug, flaky check)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_binary_flaky_check: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBmbGFreSBjaGVjayk=') }} - name: "Stateless tests (amd_binary, flaky check)" + stateless_tests_arm_binary_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBzZXF1ZW50aWFsKQ==') }} + name: "Stateless tests (arm_binary, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1665,6 +2001,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_binary, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1685,15 +2028,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_binary, flaky check)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - integration_tests_amd_asan_flaky: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBmbGFreSk=') }} - name: "Integration tests (amd_asan, flaky)" + stateless_tests_arm_asan_azure_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_amd_asan, build_amd_debug, build_arm_asan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbiwgYXp1cmUsIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (arm_asan, azure, parallel)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1703,6 +2046,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_asan, azure, parallel)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1723,15 +2073,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, flaky)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (arm_asan, azure, parallel)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - bugfix_validation_functional_tests: - runs-on: [self-hosted, arm-medium] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVnZml4IHZhbGlkYXRpb24gKGZ1bmN0aW9uYWwgdGVzdHMp') }} - name: "Bugfix validation (functional tests)" + stateless_tests_arm_asan_azure_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_amd_asan, build_amd_debug, build_arm_asan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbiwgYXp1cmUsIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (arm_asan, azure, sequential)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1741,6 +2091,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_asan, azure, sequential)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1761,15 +2118,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Bugfix validation (functional tests)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (arm_asan, azure, sequential)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - bugfix_validation_integration_tests: - runs-on: [self-hosted, amd-small-mem] - needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVnZml4IHZhbGlkYXRpb24gKGludGVncmF0aW9uIHRlc3RzKQ==') }} - name: "Bugfix validation (integration tests)" + integration_tests_amd_asan_db_disk_old_analyzer_1_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1779,6 +2136,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1799,15 +2163,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Bugfix validation (integration tests)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 1/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_asan_distributed_plan_parallel_1_2: - runs-on: [self-hosted, amd-medium-cpu] - needs: [build_amd_asan, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }} - name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" + integration_tests_amd_asan_db_disk_old_analyzer_2_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1817,6 +2181,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1837,15 +2208,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 2/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_asan_distributed_plan_parallel_2_2: - runs-on: [self-hosted, amd-medium-cpu] - needs: [build_amd_asan, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }} - name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" + integration_tests_amd_asan_db_disk_old_analyzer_3_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1855,6 +2226,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1875,15 +2253,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 3/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_asan_db_disk_distributed_plan_sequential: - runs-on: [self-hosted, amd-small-mem] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGIgZGlzaywgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }} - name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" + integration_tests_amd_asan_db_disk_old_analyzer_4_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1893,6 +2271,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1913,15 +2298,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_asan, db disk, distributed plan, sequential)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 4/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_llvm_coverage_old_analyzer_s3_storage_databasereplicated_wasmedge_parallel: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbGx2bV9jb3ZlcmFnZSwgb2xkIGFuYWx5emVyLCBzMyBzdG9yYWdlLCBEYXRhYmFzZVJlcGxpY2F0ZWQsIFdhc21FZGdlLCBwYXJhbGxlbCk=') }} - name: "Stateless tests (amd_llvm_coverage, old analyzer, s3 storage, DatabaseReplicated, WasmEdge, parallel)" + integration_tests_amd_asan_db_disk_old_analyzer_5_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1931,6 +2316,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1951,15 +2343,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_llvm_coverage, old analyzer, s3 storage, DatabaseReplicated, WasmEdge, parallel)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 5/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_llvm_coverage_old_analyzer_s3_storage_databasereplicated_wasmedge_sequential: - runs-on: [self-hosted, amd-small] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbGx2bV9jb3ZlcmFnZSwgb2xkIGFuYWx5emVyLCBzMyBzdG9yYWdlLCBEYXRhYmFzZVJlcGxpY2F0ZWQsIFdhc21FZGdlLCBzZXF1ZW50aWFsKQ==') }} - name: "Stateless tests (amd_llvm_coverage, old analyzer, s3 storage, DatabaseReplicated, WasmEdge, sequential)" + integration_tests_amd_asan_db_disk_old_analyzer_6_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -1969,6 +2361,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -1989,15 +2388,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_llvm_coverage, old analyzer, s3 storage, DatabaseReplicated, WasmEdge, sequential)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 6/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_llvm_coverage_parallelreplicas_s3_storage_parallel: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbGx2bV9jb3ZlcmFnZSwgUGFyYWxsZWxSZXBsaWNhcywgczMgc3RvcmFnZSwgcGFyYWxsZWwp') }} - name: "Stateless tests (amd_llvm_coverage, ParallelReplicas, s3 storage, parallel)" + integration_tests_amd_binary_1_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDEvNSk=') }} + name: "Integration tests (amd_binary, 1/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2007,6 +2406,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 1/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2027,15 +2433,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_llvm_coverage, ParallelReplicas, s3 storage, parallel)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_llvm_coverage_parallelreplicas_s3_storage_sequential: - runs-on: [self-hosted, amd-small] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbGx2bV9jb3ZlcmFnZSwgUGFyYWxsZWxSZXBsaWNhcywgczMgc3RvcmFnZSwgc2VxdWVudGlhbCk=') }} - name: "Stateless tests (amd_llvm_coverage, ParallelReplicas, s3 storage, sequential)" + integration_tests_amd_binary_2_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDIvNSk=') }} + name: "Integration tests (amd_binary, 2/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2045,6 +2451,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 2/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2065,15 +2478,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_llvm_coverage, ParallelReplicas, s3 storage, sequential)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_llvm_coverage_asyncinsert_s3_storage_parallel: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbGx2bV9jb3ZlcmFnZSwgQXN5bmNJbnNlcnQsIHMzIHN0b3JhZ2UsIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd_llvm_coverage, AsyncInsert, s3 storage, parallel)" + integration_tests_amd_binary_3_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDMvNSk=') }} + name: "Integration tests (amd_binary, 3/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2083,6 +2496,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 3/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2103,15 +2523,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_llvm_coverage, AsyncInsert, s3 storage, parallel)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_llvm_coverage_asyncinsert_s3_storage_sequential: - runs-on: [self-hosted, amd-small] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbGx2bV9jb3ZlcmFnZSwgQXN5bmNJbnNlcnQsIHMzIHN0b3JhZ2UsIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (amd_llvm_coverage, AsyncInsert, s3 storage, sequential)" + integration_tests_amd_binary_4_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDQvNSk=') }} + name: "Integration tests (amd_binary, 4/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2121,6 +2541,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 4/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2141,15 +2568,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_llvm_coverage, AsyncInsert, s3 storage, sequential)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_debug_parallel: - runs-on: [self-hosted, amd-medium-cpu] - needs: [build_amd_debug, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd_debug, parallel)" + integration_tests_amd_binary_5_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDUvNSk=') }} + name: "Integration tests (amd_binary, 5/5)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2159,6 +2586,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 5/5)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2179,15 +2613,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_debug_sequential: - runs-on: [self-hosted, amd-small] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (amd_debug, sequential)" + integration_tests_arm_binary_distributed_plan_1_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDEvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 1/4)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2197,6 +2631,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 1/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2217,15 +2658,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_tsan_parallel_1_2: - runs-on: [self-hosted, amd-large] - needs: [build_amd_tsan, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDEvMik=') }} - name: "Stateless tests (amd_tsan, parallel, 1/2)" + integration_tests_arm_binary_distributed_plan_2_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDIvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 2/4)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2235,6 +2676,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 2/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2255,15 +2703,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_tsan_parallel_2_2: - runs-on: [self-hosted, amd-large] - needs: [build_amd_tsan, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDIvMik=') }} - name: "Stateless tests (amd_tsan, parallel, 2/2)" + integration_tests_arm_binary_distributed_plan_3_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDMvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 3/4)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2273,6 +2721,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 3/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2293,15 +2748,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_tsan_sequential_1_2: - runs-on: [self-hosted, amd-small] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }} - name: "Stateless tests (amd_tsan, sequential, 1/2)" + integration_tests_arm_binary_distributed_plan_4_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDQvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 4/4)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2311,6 +2766,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 4/4)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2331,15 +2793,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_tsan_sequential_2_2: - runs-on: [self-hosted, amd-small] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }} - name: "Stateless tests (amd_tsan, sequential, 2/2)" + integration_tests_amd_tsan_1_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }} + name: "Integration tests (amd_tsan, 1/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2349,6 +2811,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 1/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2369,15 +2838,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_msan_wasmedge_parallel_1_2: - runs-on: [self-hosted, amd-large] - needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgV2FzbUVkZ2UsIHBhcmFsbGVsLCAxLzIp') }} - name: "Stateless tests (amd_msan, WasmEdge, parallel, 1/2)" + integration_tests_amd_tsan_2_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }} + name: "Integration tests (amd_tsan, 2/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2387,6 +2856,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 2/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2407,15 +2883,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_msan, WasmEdge, parallel, 1/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_msan_wasmedge_parallel_2_2: - runs-on: [self-hosted, amd-large] - needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgV2FzbUVkZ2UsIHBhcmFsbGVsLCAyLzIp') }} - name: "Stateless tests (amd_msan, WasmEdge, parallel, 2/2)" + integration_tests_amd_tsan_3_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }} + name: "Integration tests (amd_tsan, 3/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2425,6 +2901,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 3/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2445,15 +2928,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_msan, WasmEdge, parallel, 2/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_msan_wasmedge_sequential_1_2: - runs-on: [self-hosted, amd-small-mem] - needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgV2FzbUVkZ2UsIHNlcXVlbnRpYWwsIDEvMik=') }} - name: "Stateless tests (amd_msan, WasmEdge, sequential, 1/2)" + integration_tests_amd_tsan_4_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }} + name: "Integration tests (amd_tsan, 4/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2463,6 +2946,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2483,15 +2973,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_msan, WasmEdge, sequential, 1/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_msan_wasmedge_sequential_2_2: - runs-on: [self-hosted, amd-small-mem] - needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgV2FzbUVkZ2UsIHNlcXVlbnRpYWwsIDIvMik=') }} - name: "Stateless tests (amd_msan, WasmEdge, sequential, 2/2)" + integration_tests_amd_tsan_5_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }} + name: "Integration tests (amd_tsan, 5/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -2501,6 +2991,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -2521,1953 +3018,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_msan, WasmEdge, sequential, 2/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stateless_tests_amd_ubsan_parallel: - runs-on: [self-hosted, amd-small-mem] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_amd_ubsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd_ubsan, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_ubsan_sequential: - runs-on: [self-hosted, amd-small-mem] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_amd_ubsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (amd_ubsan, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_debug_distributed_plan_s3_storage_parallel: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_debug_distributed_plan_s3_storage_sequential: - runs-on: [self-hosted, amd-small] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_tsan_s3_storage_parallel_1_2: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwsIDEvMik=') }} - name: "Stateless tests (amd_tsan, s3 storage, parallel, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 1/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_tsan_s3_storage_parallel_2_2: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwsIDIvMik=') }} - name: "Stateless tests (amd_tsan, s3 storage, parallel, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 2/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_tsan_s3_storage_sequential_1_2: - runs-on: [self-hosted, amd-small-mem] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMS8yKQ==') }} - name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_tsan_s3_storage_sequential_2_2: - runs-on: [self-hosted, amd-small-mem] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMi8yKQ==') }} - name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_arm_binary_parallel: - runs-on: [self-hosted, arm-medium-cpu] - needs: [build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBwYXJhbGxlbCk=') }} - name: "Stateless tests (arm_binary, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_arm_binary_sequential: - runs-on: [self-hosted, arm-small] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBzZXF1ZW50aWFsKQ==') }} - name: "Stateless tests (arm_binary, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_arm_asan_azure_parallel: - runs-on: [self-hosted, arm-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_asan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbiwgYXp1cmUsIHBhcmFsbGVsKQ==') }} - name: "Stateless tests (arm_asan, azure, parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (arm_asan, azure, parallel)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_arm_asan_azure_sequential: - runs-on: [self-hosted, arm-small-mem] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_asan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbiwgYXp1cmUsIHNlcXVlbnRpYWwp') }} - name: "Stateless tests (arm_asan, azure, sequential)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (arm_asan, azure, sequential)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_llvm_coverage_1_3: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbGx2bV9jb3ZlcmFnZSwgMS8zKQ==') }} - name: "Stateless tests (amd_llvm_coverage, 1/3)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_llvm_coverage, 1/3)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_llvm_coverage_2_3: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbGx2bV9jb3ZlcmFnZSwgMi8zKQ==') }} - name: "Stateless tests (amd_llvm_coverage, 2/3)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_llvm_coverage, 2/3)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stateless_tests_amd_llvm_coverage_3_3: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbGx2bV9jb3ZlcmFnZSwgMy8zKQ==') }} - name: "Stateless tests (amd_llvm_coverage, 3/3)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stateless tests (amd_llvm_coverage, 3/3)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_asan_db_disk_old_analyzer_1_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 1/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_asan_db_disk_old_analyzer_2_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 2/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_asan_db_disk_old_analyzer_3_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 3/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_asan_db_disk_old_analyzer_4_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 4/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_asan_db_disk_old_analyzer_5_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 5/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_asan_db_disk_old_analyzer_6_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }} - name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 6/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_binary_1_5: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDEvNSk=') }} - name: "Integration tests (amd_binary, 1/5)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_binary_2_5: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDIvNSk=') }} - name: "Integration tests (amd_binary, 2/5)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_binary_3_5: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDMvNSk=') }} - name: "Integration tests (amd_binary, 3/5)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_binary_4_5: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDQvNSk=') }} - name: "Integration tests (amd_binary, 4/5)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_binary_5_5: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_binary, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDUvNSk=') }} - name: "Integration tests (amd_binary, 5/5)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_arm_binary_distributed_plan_1_4: - runs-on: [self-hosted, arm-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDEvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 1/4)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_arm_binary_distributed_plan_2_4: - runs-on: [self-hosted, arm-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDIvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 2/4)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_arm_binary_distributed_plan_3_4: - runs-on: [self-hosted, arm-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDMvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 3/4)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_arm_binary_distributed_plan_4_4: - runs-on: [self-hosted, arm-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDQvNCk=') }} - name: "Integration tests (arm_binary, distributed plan, 4/4)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_tsan_1_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }} - name: "Integration tests (amd_tsan, 1/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_tsan_2_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }} - name: "Integration tests (amd_tsan, 2/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_tsan_3_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }} - name: "Integration tests (amd_tsan, 3/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_tsan_4_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }} - name: "Integration tests (amd_tsan, 4/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_tsan_5_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }} - name: "Integration tests (amd_tsan, 5/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_tsan_6_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }} - name: "Integration tests (amd_tsan, 6/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_msan_1_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCAxLzYp') }} - name: "Integration tests (amd_msan, 1/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 1/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_msan_2_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCAyLzYp') }} - name: "Integration tests (amd_msan, 2/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 2/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_msan_3_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCAzLzYp') }} - name: "Integration tests (amd_msan, 3/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 3/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_msan_4_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCA0LzYp') }} - name: "Integration tests (amd_msan, 4/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 4/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_msan_5_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCA1LzYp') }} - name: "Integration tests (amd_msan, 5/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 5/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_msan_6_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCA2LzYp') }} - name: "Integration tests (amd_msan, 6/6)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 6/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_llvm_coverage_1_5: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9sbHZtX2NvdmVyYWdlLCAxLzUp') }} - name: "Integration tests (amd_llvm_coverage, 1/5)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_llvm_coverage, 1/5)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_llvm_coverage_2_5: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9sbHZtX2NvdmVyYWdlLCAyLzUp') }} - name: "Integration tests (amd_llvm_coverage, 2/5)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_llvm_coverage, 2/5)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_llvm_coverage_3_5: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9sbHZtX2NvdmVyYWdlLCAzLzUp') }} - name: "Integration tests (amd_llvm_coverage, 3/5)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_llvm_coverage, 3/5)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_llvm_coverage_4_5: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9sbHZtX2NvdmVyYWdlLCA0LzUp') }} - name: "Integration tests (amd_llvm_coverage, 4/5)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_llvm_coverage, 4/5)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - integration_tests_amd_llvm_coverage_5_5: - runs-on: [self-hosted, amd-medium] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9sbHZtX2NvdmVyYWdlLCA1LzUp') }} - name: "Integration tests (amd_llvm_coverage, 5/5)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_llvm_coverage, 5/5)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - unit_tests_asan: - runs-on: [self-hosted, amd-large] - needs: [build_amd_asan, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAoYXNhbik=') }} - name: "Unit tests (asan)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Unit tests (asan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - unit_tests_tsan: - runs-on: [self-hosted, amd-large] - needs: [build_amd_tsan, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAodHNhbik=') }} - name: "Unit tests (tsan)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Unit tests (tsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - unit_tests_msan: - runs-on: [self-hosted, amd-large] - needs: [build_amd_msan, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAobXNhbik=') }} - name: "Unit tests (msan)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Unit tests (msan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - unit_tests_ubsan: - runs-on: [self-hosted, amd-large] - needs: [build_amd_ubsan, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAodWJzYW4p') }} - name: "Unit tests (ubsan)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 - with: - ref: ${{ env.CHECKOUT_REF }} - - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Unit tests (ubsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - unit_tests_amd_llvm_coverage: - runs-on: [self-hosted, amd-large] - needs: [build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAoYW1kX2xsdm1fY292ZXJhZ2Up') }} - name: "Unit tests (amd_llvm_coverage)" + integration_tests_amd_tsan_6_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }} + name: "Integration tests (amd_tsan, 6/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4477,43 +3036,12 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Unit tests (amd_llvm_coverage)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - docker_server_image: - runs-on: [self-hosted, style-checker] - needs: [build_amd_asan, build_amd_debug, build_amd_release, build_amd_tsan, build_arm_binary, build_arm_release, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} - name: "Docker server image" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup with: - ref: ${{ env.CHECKOUT_REF }} + test_name: "Integration tests (amd_tsan, 6/6)" - name: Prepare env script run: | @@ -4535,15 +3063,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Docker server image' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - docker_keeper_image: - runs-on: [self-hosted, style-checker] - needs: [build_amd_asan, build_amd_debug, build_amd_release, build_amd_tsan, build_arm_binary, build_arm_release, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} - name: "Docker keeper image" + integration_tests_amd_msan_1_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCAxLzYp') }} + name: "Integration tests (amd_msan, 1/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4553,43 +3081,12 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Docker keeper image' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - install_packages_amd_release: - runs-on: [self-hosted, style-checker] - needs: [build_amd_asan, build_amd_debug, build_amd_release, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX3JlbGVhc2Up') }} - name: "Install packages (amd_release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup with: - ref: ${{ env.CHECKOUT_REF }} + test_name: "Integration tests (amd_msan, 1/6)" - name: Prepare env script run: | @@ -4611,15 +3108,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Install packages (amd_release)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 1/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - install_packages_arm_release: - runs-on: [self-hosted, style-checker-aarch64] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_release, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYXJtX3JlbGVhc2Up') }} - name: "Install packages (arm_release)" + integration_tests_amd_msan_2_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCAyLzYp') }} + name: "Integration tests (amd_msan, 2/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4629,43 +3126,12 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Install packages (arm_release)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - compatibility_check_amd_release: - runs-on: [self-hosted, style-checker] - needs: [build_amd_asan, build_amd_debug, build_amd_release, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYW1kX3JlbGVhc2Up') }} - name: "Compatibility check (amd_release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup with: - ref: ${{ env.CHECKOUT_REF }} + test_name: "Integration tests (amd_msan, 2/6)" - name: Prepare env script run: | @@ -4687,15 +3153,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Compatibility check (amd_release)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 2/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - compatibility_check_arm_release: - runs-on: [self-hosted, style-checker-aarch64] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_release, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYXJtX3JlbGVhc2Up') }} - name: "Compatibility check (arm_release)" + integration_tests_amd_msan_3_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCAzLzYp') }} + name: "Integration tests (amd_msan, 3/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4705,43 +3171,12 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} - - name: Prepare env script - run: | - rm -rf ./ci/tmp - mkdir -p ./ci/tmp - cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:.: - - cat > ./ci/tmp/workflow_job.json << 'EOF' - ${{ toJson(job) }} - EOF - cat > ./ci/tmp/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - - name: Run - id: run - run: | - . ./ci/tmp/praktika_setup_env.sh - set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Compatibility check (arm_release)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime - prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") - for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - - stress_test_amd_release: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_release, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9yZWxlYXNlKQ==') }} - name: "Stress test (amd_release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} - steps: - - name: Checkout code - uses: actions/checkout@v6 + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup with: - ref: ${{ env.CHECKOUT_REF }} + test_name: "Integration tests (amd_msan, 3/6)" - name: Prepare env script run: | @@ -4763,15 +3198,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_release)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 3/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stress_test_amd_debug: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9kZWJ1Zyk=') }} - name: "Stress test (amd_debug)" + integration_tests_amd_msan_4_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCA0LzYp') }} + name: "Integration tests (amd_msan, 4/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4781,6 +3216,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_msan, 4/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4801,15 +3243,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_debug)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 4/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stress_test_amd_tsan: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }} - name: "Stress test (amd_tsan)" + integration_tests_amd_msan_5_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCA1LzYp') }} + name: "Integration tests (amd_msan, 5/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4819,6 +3261,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_msan, 5/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4839,15 +3288,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_tsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 5/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stress_test_arm_asan: - runs-on: [self-hosted, arm-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_asan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuKQ==') }} - name: "Stress test (arm_asan)" + integration_tests_amd_msan_6_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9tc2FuLCA2LzYp') }} + name: "Integration tests (amd_msan, 6/6)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4857,6 +3306,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_msan, 6/6)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4877,15 +3333,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (arm_asan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Integration tests (amd_msan, 6/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stress_test_arm_asan_s3: - runs-on: [self-hosted, arm-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_asan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuLCBzMyk=') }} - name: "Stress test (arm_asan, s3)" + unit_tests_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAoYXNhbik=') }} + name: "Unit tests (asan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4895,6 +3351,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4915,15 +3378,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (arm_asan, s3)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Unit tests (asan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stress_test_amd_ubsan: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_amd_ubsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF91YnNhbik=') }} - name: "Stress test (amd_ubsan)" + unit_tests_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_tsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAodHNhbik=') }} + name: "Unit tests (tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4933,6 +3396,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4953,15 +3423,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Unit tests (tsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - stress_test_amd_msan: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9tc2FuKQ==') }} - name: "Stress test (amd_msan)" + unit_tests_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_msan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAobXNhbik=') }} + name: "Unit tests (msan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -4971,6 +3441,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -4991,15 +3468,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_msan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Unit tests (msan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - upgrade_check_amd_release: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_release, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VXBncmFkZSBjaGVjayAoYW1kX3JlbGVhc2Up') }} - name: "Upgrade check (amd_release)" + unit_tests_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_ubsan, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAodWJzYW4p') }} + name: "Unit tests (ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5009,6 +3486,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5029,15 +3513,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Upgrade check (amd_release)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Unit tests (ubsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - ast_fuzzer_amd_debug: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX2RlYnVnKQ==') }} - name: "AST fuzzer (amd_debug)" + docker_server_image: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [build_amd_release, build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} + name: "Docker server image" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5047,6 +3531,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker server image" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5067,15 +3558,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Docker server image' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - ast_fuzzer_arm_asan: - runs-on: [self-hosted, arm-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_asan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYXJtX2FzYW4p') }} - name: "AST fuzzer (arm_asan)" + docker_keeper_image: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [build_amd_release, build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} + name: "Docker keeper image" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5085,6 +3576,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker keeper image" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5105,15 +3603,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (arm_asan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Docker keeper image' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - ast_fuzzer_amd_tsan: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3RzYW4p') }} - name: "AST fuzzer (amd_tsan)" + install_packages_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_release, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX3JlbGVhc2Up') }} + name: "Install packages (amd_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5123,6 +3621,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5143,15 +3648,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Install packages (amd_release)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - ast_fuzzer_amd_msan: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX21zYW4p') }} - name: "AST fuzzer (amd_msan)" + install_packages_arm_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYXJtX3JlbGVhc2Up') }} + name: "Install packages (arm_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5161,6 +3666,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5181,15 +3693,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Install packages (arm_release)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - ast_fuzzer_amd_ubsan: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_amd_ubsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3Vic2FuKQ==') }} - name: "AST fuzzer (amd_ubsan)" + compatibility_check_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [build_amd_asan, build_amd_debug, build_amd_release, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYW1kX3JlbGVhc2Up') }} + name: "Compatibility check (amd_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5199,6 +3711,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Compatibility check (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5219,15 +3738,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Compatibility check (amd_release)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - buzzhouse_amd_debug: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfZGVidWcp') }} - name: "BuzzHouse (amd_debug)" + compatibility_check_arm_release: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYXJtX3JlbGVhc2Up') }} + name: "Compatibility check (arm_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5237,6 +3756,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Compatibility check (arm_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5257,15 +3783,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Compatibility check (arm_release)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - buzzhouse_arm_asan: - runs-on: [self-hosted, arm-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_asan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhcm1fYXNhbik=') }} - name: "BuzzHouse (arm_asan)" + stress_test_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_release, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9yZWxlYXNlKQ==') }} + name: "Stress test (amd_release)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5275,6 +3801,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_release)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5295,15 +3828,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (arm_asan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_release)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - buzzhouse_amd_tsan: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdHNhbik=') }} - name: "BuzzHouse (amd_tsan)" + stress_test_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9kZWJ1Zyk=') }} + name: "Stress test (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5313,6 +3846,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5333,15 +3873,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_debug)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - buzzhouse_amd_msan: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfbXNhbik=') }} - name: "BuzzHouse (amd_msan)" + stress_test_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }} + name: "Stress test (amd_tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5351,6 +3891,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5371,15 +3918,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_tsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - buzzhouse_amd_ubsan: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_amd_ubsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdWJzYW4p') }} - name: "BuzzHouse (amd_ubsan)" + stress_test_arm_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_amd_asan, build_amd_debug, build_arm_asan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuKQ==') }} + name: "Stress test (arm_asan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5389,6 +3936,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (arm_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5409,15 +3963,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (arm_asan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_amd_release_master_head_1_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_release, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAxLzYp') }} - name: "Performance Comparison (amd_release, master_head, 1/6)" + stress_test_arm_asan_s3: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_amd_asan, build_amd_debug, build_arm_asan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuLCBzMyk=') }} + name: "Stress test (arm_asan, s3)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5427,6 +3981,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (arm_asan, s3)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5447,15 +4008,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (amd_release, master_head, 1/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (arm_asan, s3)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_amd_release_master_head_2_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_release, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAyLzYp') }} - name: "Performance Comparison (amd_release, master_head, 2/6)" + stress_test_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_ubsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF91YnNhbik=') }} + name: "Stress test (amd_ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5465,6 +4026,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5485,15 +4053,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (amd_release, master_head, 2/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_amd_release_master_head_3_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_release, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAzLzYp') }} - name: "Performance Comparison (amd_release, master_head, 3/6)" + stress_test_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9tc2FuKQ==') }} + name: "Stress test (amd_msan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5503,6 +4071,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stress test (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5523,15 +4098,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (amd_release, master_head, 3/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'Stress test (amd_msan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_amd_release_master_head_4_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_release, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA0LzYp') }} - name: "Performance Comparison (amd_release, master_head, 4/6)" + ast_fuzzer_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX2RlYnVnKQ==') }} + name: "AST fuzzer (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5541,6 +4116,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5561,15 +4143,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (amd_release, master_head, 4/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_amd_release_master_head_5_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_release, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA1LzYp') }} - name: "Performance Comparison (amd_release, master_head, 5/6)" + ast_fuzzer_arm_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_amd_asan, build_amd_debug, build_arm_asan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYXJtX2FzYW4p') }} + name: "AST fuzzer (arm_asan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5579,6 +4161,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (arm_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5599,15 +4188,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (amd_release, master_head, 5/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (arm_asan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_amd_release_master_head_6_6: - runs-on: [self-hosted, amd-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_release, build_amd_tsan, build_arm_binary, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA2LzYp') }} - name: "Performance Comparison (amd_release, master_head, 6/6)" + ast_fuzzer_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3RzYW4p') }} + name: "AST fuzzer (amd_tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5617,6 +4206,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5637,15 +4233,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (amd_release, master_head, 6/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_arm_release_master_head_1_6: - runs-on: [self-hosted, arm-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_release, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAxLzYp') }} - name: "Performance Comparison (arm_release, master_head, 1/6)" + ast_fuzzer_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX21zYW4p') }} + name: "AST fuzzer (amd_msan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5655,6 +4251,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5675,15 +4278,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (arm_release, master_head, 1/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_arm_release_master_head_2_6: - runs-on: [self-hosted, arm-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_release, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAyLzYp') }} - name: "Performance Comparison (arm_release, master_head, 2/6)" + ast_fuzzer_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_ubsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3Vic2FuKQ==') }} + name: "AST fuzzer (amd_ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5693,6 +4296,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "AST fuzzer (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5713,15 +4323,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (arm_release, master_head, 2/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_arm_release_master_head_3_6: - runs-on: [self-hosted, arm-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_release, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAzLzYp') }} - name: "Performance Comparison (arm_release, master_head, 3/6)" + buzzhouse_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfZGVidWcp') }} + name: "BuzzHouse (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5731,6 +4341,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_debug)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5751,15 +4368,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (arm_release, master_head, 3/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_arm_release_master_head_4_6: - runs-on: [self-hosted, arm-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_release, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA0LzYp') }} - name: "Performance Comparison (arm_release, master_head, 4/6)" + buzzhouse_arm_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_amd_asan, build_amd_debug, build_arm_asan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhcm1fYXNhbik=') }} + name: "BuzzHouse (arm_asan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5769,6 +4386,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (arm_asan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5789,15 +4413,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (arm_release, master_head, 4/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (arm_asan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_arm_release_master_head_5_6: - runs-on: [self-hosted, arm-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_release, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA1LzYp') }} - name: "Performance Comparison (arm_release, master_head, 5/6)" + buzzhouse_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdHNhbik=') }} + name: "BuzzHouse (amd_tsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5807,6 +4431,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_tsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5827,15 +4458,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (arm_release, master_head, 5/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - performance_comparison_arm_release_master_head_6_6: - runs-on: [self-hosted, arm-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_release, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCA2LzYp') }} - name: "Performance Comparison (arm_release, master_head, 6/6)" + buzzhouse_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_msan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfbXNhbik=') }} + name: "BuzzHouse (amd_msan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5845,6 +4476,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_msan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5865,15 +4503,15 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'Performance Comparison (arm_release, master_head, 6/6)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log - llvm_coverage: - runs-on: [self-hosted, amd-small] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_tidy, build_llvm_coverage_build, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, integration_tests_amd_llvm_coverage_1_5, integration_tests_amd_llvm_coverage_2_5, integration_tests_amd_llvm_coverage_3_5, integration_tests_amd_llvm_coverage_4_5, integration_tests_amd_llvm_coverage_5_5, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_llvm_coverage_1_3, stateless_tests_amd_llvm_coverage_2_3, stateless_tests_amd_llvm_coverage_3_3, stateless_tests_amd_llvm_coverage_asyncinsert_s3_storage_parallel, stateless_tests_amd_llvm_coverage_asyncinsert_s3_storage_sequential, stateless_tests_amd_llvm_coverage_old_analyzer_s3_storage_databasereplicated_wasmedge_parallel, stateless_tests_amd_llvm_coverage_old_analyzer_s3_storage_databasereplicated_wasmedge_sequential, stateless_tests_amd_llvm_coverage_parallelreplicas_s3_storage_parallel, stateless_tests_amd_llvm_coverage_parallelreplicas_s3_storage_sequential, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check, unit_tests_amd_llvm_coverage] - if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'TExWTSBDb3ZlcmFnZQ==') }} - name: "LLVM Coverage" + buzzhouse_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [build_amd_asan, build_amd_debug, build_amd_ubsan, build_arm_binary, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdWJzYW4p') }} + name: "BuzzHouse (amd_ubsan)" outputs: data: ${{ steps.run.outputs.DATA }} pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} @@ -5883,6 +4521,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "BuzzHouse (amd_ubsan)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5903,13 +4548,13 @@ jobs: run: | . ./ci/tmp/praktika_setup_env.sh set -o pipefail - PYTHONUNBUFFERED=1 python3 -m praktika run 'LLVM Coverage' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime + PYTHONUNBUFFERED=1 python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log sqllogic_test: - runs-on: [self-hosted, arm-medium] - needs: [build_amd_asan, build_amd_debug, build_amd_tsan, build_arm_binary, build_arm_release, build_arm_tidy, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_arm_binary_parallel, style_check] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [build_amd_asan, build_amd_debug, build_arm_binary, build_arm_release, config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U1FMTG9naWMgdGVzdA==') }} name: "SQLLogic test" outputs: @@ -5921,6 +4566,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "SQLLogic test" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5946,7 +4598,7 @@ jobs: for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log build_toolchain_pgo_bolt_amd64: - runs-on: [self-hosted, amd-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgVG9vbGNoYWluIChQR08sIEJPTFQpIChhbWQ2NCk=') }} name: "Build Toolchain (PGO, BOLT) (amd64)" @@ -5959,6 +4611,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build Toolchain (PGO, BOLT) (amd64)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -5984,7 +4643,7 @@ jobs: for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log build_toolchain_pgo_bolt_aarch64: - runs-on: [self-hosted, arm-large] + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgVG9vbGNoYWluIChQR08sIEJPTFQpIChhYXJjaDY0KQ==') }} name: "Build Toolchain (PGO, BOLT) (aarch64)" @@ -5997,6 +4656,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build Toolchain (PGO, BOLT) (aarch64)" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -6022,8 +4688,8 @@ jobs: for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log finish_workflow: - runs-on: [self-hosted, style-checker-aarch64] - needs: [ast_fuzzer_amd_debug, ast_fuzzer_amd_debug_targeted, ast_fuzzer_amd_debug_targeted_old_compatibility, ast_fuzzer_amd_msan, ast_fuzzer_amd_tsan, ast_fuzzer_amd_ubsan, ast_fuzzer_arm_asan, bugfix_validation_functional_tests, bugfix_validation_integration_tests, build_amd_asan, build_amd_binary, build_amd_compat, build_amd_darwin, build_amd_debug, build_amd_freebsd, build_amd_msan, build_amd_musl, build_amd_release, build_amd_tsan, build_amd_ubsan, build_arm_asan, build_arm_binary, build_arm_darwin, build_arm_fuzzers, build_arm_release, build_arm_tidy, build_arm_tsan, build_arm_v80compat, build_llvm_coverage_build, build_loongarch64, build_ppc64le, build_riscv64, build_s390x, build_toolchain_pgo_bolt_aarch64, build_toolchain_pgo_bolt_amd64, buzzhouse_amd_debug, buzzhouse_amd_msan, buzzhouse_amd_tsan, buzzhouse_amd_ubsan, buzzhouse_arm_asan, code_review, compatibility_check_amd_release, compatibility_check_arm_release, config_workflow, docker_keeper_image, docker_server_image, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, docs_check, fast_test, install_packages_amd_release, install_packages_arm_release, integration_tests_amd_asan_db_disk_old_analyzer_1_6, integration_tests_amd_asan_db_disk_old_analyzer_2_6, integration_tests_amd_asan_db_disk_old_analyzer_3_6, integration_tests_amd_asan_db_disk_old_analyzer_4_6, integration_tests_amd_asan_db_disk_old_analyzer_5_6, integration_tests_amd_asan_db_disk_old_analyzer_6_6, integration_tests_amd_asan_flaky, integration_tests_amd_asan_targeted, integration_tests_amd_binary_1_5, integration_tests_amd_binary_2_5, integration_tests_amd_binary_3_5, integration_tests_amd_binary_4_5, integration_tests_amd_binary_5_5, integration_tests_amd_llvm_coverage_1_5, integration_tests_amd_llvm_coverage_2_5, integration_tests_amd_llvm_coverage_3_5, integration_tests_amd_llvm_coverage_4_5, integration_tests_amd_llvm_coverage_5_5, integration_tests_amd_msan_1_6, integration_tests_amd_msan_2_6, integration_tests_amd_msan_3_6, integration_tests_amd_msan_4_6, integration_tests_amd_msan_5_6, integration_tests_amd_msan_6_6, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, integration_tests_arm_binary_distributed_plan_1_4, integration_tests_arm_binary_distributed_plan_2_4, integration_tests_arm_binary_distributed_plan_3_4, integration_tests_arm_binary_distributed_plan_4_4, llvm_coverage, performance_comparison_amd_release_master_head_1_6, performance_comparison_amd_release_master_head_2_6, performance_comparison_amd_release_master_head_3_6, performance_comparison_amd_release_master_head_4_6, performance_comparison_amd_release_master_head_5_6, performance_comparison_amd_release_master_head_6_6, performance_comparison_arm_release_master_head_1_6, performance_comparison_arm_release_master_head_2_6, performance_comparison_arm_release_master_head_3_6, performance_comparison_arm_release_master_head_4_6, performance_comparison_arm_release_master_head_5_6, performance_comparison_arm_release_master_head_6_6, quick_functional_tests, smoke_test_amd_darwin, sqllogic_test, stateless_tests_amd_asan_db_disk_distributed_plan_sequential, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_flaky_check, stateless_tests_amd_binary_flaky_check, stateless_tests_amd_debug_distributed_plan_s3_storage_parallel, stateless_tests_amd_debug_distributed_plan_s3_storage_sequential, stateless_tests_amd_debug_flaky_check, stateless_tests_amd_debug_parallel, stateless_tests_amd_debug_sequential, stateless_tests_amd_llvm_coverage_1_3, stateless_tests_amd_llvm_coverage_2_3, stateless_tests_amd_llvm_coverage_3_3, stateless_tests_amd_llvm_coverage_asyncinsert_s3_storage_parallel, stateless_tests_amd_llvm_coverage_asyncinsert_s3_storage_sequential, stateless_tests_amd_llvm_coverage_old_analyzer_s3_storage_databasereplicated_wasmedge_parallel, stateless_tests_amd_llvm_coverage_old_analyzer_s3_storage_databasereplicated_wasmedge_sequential, stateless_tests_amd_llvm_coverage_parallelreplicas_s3_storage_parallel, stateless_tests_amd_llvm_coverage_parallelreplicas_s3_storage_sequential, stateless_tests_amd_msan_flaky_check, stateless_tests_amd_msan_wasmedge_parallel_1_2, stateless_tests_amd_msan_wasmedge_parallel_2_2, stateless_tests_amd_msan_wasmedge_sequential_1_2, stateless_tests_amd_msan_wasmedge_sequential_2_2, stateless_tests_amd_tsan_flaky_check, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_amd_tsan_s3_storage_parallel_1_2, stateless_tests_amd_tsan_s3_storage_parallel_2_2, stateless_tests_amd_tsan_s3_storage_sequential_1_2, stateless_tests_amd_tsan_s3_storage_sequential_2_2, stateless_tests_amd_tsan_sequential_1_2, stateless_tests_amd_tsan_sequential_2_2, stateless_tests_amd_ubsan_flaky_check, stateless_tests_amd_ubsan_parallel, stateless_tests_amd_ubsan_sequential, stateless_tests_arm_asan_azure_parallel, stateless_tests_arm_asan_azure_sequential, stateless_tests_arm_asan_targeted, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential, stress_test_amd_debug, stress_test_amd_msan, stress_test_amd_release, stress_test_amd_tsan, stress_test_amd_ubsan, stress_test_arm_asan, stress_test_arm_asan_s3, style_check, unit_tests_amd_llvm_coverage, unit_tests_asan, unit_tests_msan, unit_tests_tsan, unit_tests_ubsan, upgrade_check_amd_release] + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [ast_fuzzer_amd_debug, ast_fuzzer_amd_debug_targeted, ast_fuzzer_amd_debug_targeted_old_compatibility, ast_fuzzer_amd_msan, ast_fuzzer_amd_tsan, ast_fuzzer_amd_ubsan, ast_fuzzer_arm_asan, build_amd_asan, build_amd_binary, build_amd_debug, build_amd_msan, build_amd_release, build_amd_tsan, build_amd_ubsan, build_arm_asan, build_arm_binary, build_arm_release, build_arm_tsan, build_toolchain_pgo_bolt_aarch64, build_toolchain_pgo_bolt_amd64, buzzhouse_amd_debug, buzzhouse_amd_msan, buzzhouse_amd_tsan, buzzhouse_amd_ubsan, buzzhouse_arm_asan, compatibility_check_amd_release, compatibility_check_arm_release, config_workflow, docker_keeper_image, docker_server_image, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, install_packages_amd_release, install_packages_arm_release, integration_tests_amd_asan_db_disk_old_analyzer_1_6, integration_tests_amd_asan_db_disk_old_analyzer_2_6, integration_tests_amd_asan_db_disk_old_analyzer_3_6, integration_tests_amd_asan_db_disk_old_analyzer_4_6, integration_tests_amd_asan_db_disk_old_analyzer_5_6, integration_tests_amd_asan_db_disk_old_analyzer_6_6, integration_tests_amd_asan_targeted, integration_tests_amd_binary_1_5, integration_tests_amd_binary_2_5, integration_tests_amd_binary_3_5, integration_tests_amd_binary_4_5, integration_tests_amd_binary_5_5, integration_tests_amd_msan_1_6, integration_tests_amd_msan_2_6, integration_tests_amd_msan_3_6, integration_tests_amd_msan_4_6, integration_tests_amd_msan_5_6, integration_tests_amd_msan_6_6, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, integration_tests_arm_binary_distributed_plan_1_4, integration_tests_arm_binary_distributed_plan_2_4, integration_tests_arm_binary_distributed_plan_3_4, integration_tests_arm_binary_distributed_plan_4_4, quick_functional_tests, sqllogic_test, stateless_tests_amd_asan_db_disk_distributed_plan_sequential, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_distributed_plan_s3_storage_parallel, stateless_tests_amd_debug_distributed_plan_s3_storage_sequential, stateless_tests_amd_debug_parallel, stateless_tests_amd_debug_sequential, stateless_tests_amd_msan_wasmedge_parallel_1_2, stateless_tests_amd_msan_wasmedge_parallel_2_2, stateless_tests_amd_msan_wasmedge_sequential_1_2, stateless_tests_amd_msan_wasmedge_sequential_2_2, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_amd_tsan_s3_storage_parallel_1_2, stateless_tests_amd_tsan_s3_storage_parallel_2_2, stateless_tests_amd_tsan_s3_storage_sequential_1_2, stateless_tests_amd_tsan_s3_storage_sequential_2_2, stateless_tests_amd_tsan_sequential_1_2, stateless_tests_amd_tsan_sequential_2_2, stateless_tests_amd_ubsan_parallel, stateless_tests_amd_ubsan_sequential, stateless_tests_arm_asan_azure_parallel, stateless_tests_arm_asan_azure_sequential, stateless_tests_arm_asan_targeted, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential, stress_test_amd_debug, stress_test_amd_msan, stress_test_amd_release, stress_test_amd_tsan, stress_test_amd_ubsan, stress_test_arm_asan, stress_test_arm_asan_s3, unit_tests_asan, unit_tests_msan, unit_tests_tsan, unit_tests_ubsan] if: ${{ always() }} name: "Finish Workflow" outputs: @@ -6035,6 +4701,13 @@ jobs: with: ref: ${{ env.CHECKOUT_REF }} + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Finish Workflow" + - name: Prepare env script run: | rm -rf ./ci/tmp @@ -6058,3 +4731,213 @@ jobs: PYTHONUNBUFFERED=1 python3 -m praktika run 'Finish Workflow' --workflow "PR" --ci 2>&1 | python3 -u -c 'import sys,datetime prefix=lambda: datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]") for line in sys.stdin: sys.stdout.write(prefix() + " " + line); sys.stdout.flush()' | tee ./ci/tmp/job.log + +########################################################################################## +##################################### ALTINITY JOBS ###################################### +########################################################################################## + + GrypeScanServer: + needs: [config_workflow, docker_server_image] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} + strategy: + fail-fast: false + matrix: + suffix: ['', '-alpine'] + uses: ./.github/workflows/grype_scan.yml + secrets: inherit + with: + docker_image: altinityinfra/clickhouse-server + version: ${{ fromJson(needs.config_workflow.outputs.data).JOB_KV_DATA.version.string }} + tag-suffix: ${{ matrix.suffix }} + GrypeScanKeeper: + needs: [config_workflow, docker_keeper_image] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} + uses: ./.github/workflows/grype_scan.yml + secrets: inherit + with: + docker_image: altinityinfra/clickhouse-keeper + version: ${{ fromJson(needs.config_workflow.outputs.data).JOB_KV_DATA.version.string }} + + RegressionTestsRelease: + needs: [config_workflow, build_amd_binary, stateless_tests_amd_debug_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(fromJson(needs.config_workflow.outputs.data).JOB_KV_DATA.ci_exclude_tags, 'regression')}} + uses: ./.github/workflows/regression.yml + secrets: inherit + with: + runner_type: altinity-regression-tester + commit: c7897a6a858a9ef9c7b3c519e7291cfd3c2ec646 + arch: release + build_sha: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + timeout_minutes: 210 + workflow_config: ${{ needs.config_workflow.outputs.data }} + RegressionTestsAarch64: + needs: [config_workflow, build_arm_binary, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(fromJson(needs.config_workflow.outputs.data).JOB_KV_DATA.ci_exclude_tags, 'regression') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.custom_data.ci_exclude_tags, 'aarch64')}} + uses: ./.github/workflows/regression.yml + secrets: inherit + with: + runner_type: altinity-regression-tester-aarch64 + commit: c7897a6a858a9ef9c7b3c519e7291cfd3c2ec646 + arch: aarch64 + build_sha: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + timeout_minutes: 210 + workflow_config: ${{ needs.config_workflow.outputs.data }} + + FinishCIReport: + if: ${{ !cancelled() }} + needs: + - config_workflow + - dockers_build_amd + - dockers_build_arm + - dockers_build_multiplatform_manifest + - fast_test + - build_amd_debug + - build_amd_asan + - build_amd_tsan + - build_amd_msan + - build_amd_ubsan + - build_amd_binary + - build_arm_asan + - build_arm_binary + - build_arm_tsan + - build_amd_release + - build_arm_release + - quick_functional_tests + - stateless_tests_arm_asan_targeted + - integration_tests_amd_asan_targeted + - ast_fuzzer_amd_debug_targeted + - ast_fuzzer_amd_debug_targeted_old_compatibility + - stateless_tests_amd_asan_distributed_plan_parallel_1_2 + - stateless_tests_amd_asan_distributed_plan_parallel_2_2 + - stateless_tests_amd_asan_db_disk_distributed_plan_sequential + - stateless_tests_amd_debug_parallel + - stateless_tests_amd_debug_sequential + - stateless_tests_amd_tsan_parallel_1_2 + - stateless_tests_amd_tsan_parallel_2_2 + - stateless_tests_amd_tsan_sequential_1_2 + - stateless_tests_amd_tsan_sequential_2_2 + - stateless_tests_amd_msan_wasmedge_parallel_1_2 + - stateless_tests_amd_msan_wasmedge_parallel_2_2 + - stateless_tests_amd_msan_wasmedge_sequential_1_2 + - stateless_tests_amd_msan_wasmedge_sequential_2_2 + - stateless_tests_amd_ubsan_parallel + - stateless_tests_amd_ubsan_sequential + - stateless_tests_amd_debug_distributed_plan_s3_storage_parallel + - stateless_tests_amd_debug_distributed_plan_s3_storage_sequential + - stateless_tests_amd_tsan_s3_storage_parallel_1_2 + - stateless_tests_amd_tsan_s3_storage_parallel_2_2 + - stateless_tests_amd_tsan_s3_storage_sequential_1_2 + - stateless_tests_amd_tsan_s3_storage_sequential_2_2 + - stateless_tests_arm_binary_parallel + - stateless_tests_arm_binary_sequential + - stateless_tests_arm_asan_azure_parallel + - stateless_tests_arm_asan_azure_sequential + - integration_tests_amd_asan_db_disk_old_analyzer_1_6 + - integration_tests_amd_asan_db_disk_old_analyzer_2_6 + - integration_tests_amd_asan_db_disk_old_analyzer_3_6 + - integration_tests_amd_asan_db_disk_old_analyzer_4_6 + - integration_tests_amd_asan_db_disk_old_analyzer_5_6 + - integration_tests_amd_asan_db_disk_old_analyzer_6_6 + - integration_tests_amd_binary_1_5 + - integration_tests_amd_binary_2_5 + - integration_tests_amd_binary_3_5 + - integration_tests_amd_binary_4_5 + - integration_tests_amd_binary_5_5 + - integration_tests_arm_binary_distributed_plan_1_4 + - integration_tests_arm_binary_distributed_plan_2_4 + - integration_tests_arm_binary_distributed_plan_3_4 + - integration_tests_arm_binary_distributed_plan_4_4 + - integration_tests_amd_tsan_1_6 + - integration_tests_amd_tsan_2_6 + - integration_tests_amd_tsan_3_6 + - integration_tests_amd_tsan_4_6 + - integration_tests_amd_tsan_5_6 + - integration_tests_amd_tsan_6_6 + - integration_tests_amd_msan_1_6 + - integration_tests_amd_msan_2_6 + - integration_tests_amd_msan_3_6 + - integration_tests_amd_msan_4_6 + - integration_tests_amd_msan_5_6 + - integration_tests_amd_msan_6_6 + - unit_tests_asan + - unit_tests_tsan + - unit_tests_msan + - unit_tests_ubsan + - docker_server_image + - docker_keeper_image + - install_packages_amd_release + - install_packages_arm_release + - compatibility_check_amd_release + - compatibility_check_arm_release + - stress_test_amd_release + - stress_test_amd_debug + - stress_test_amd_tsan + - stress_test_arm_asan + - stress_test_arm_asan_s3 + - stress_test_amd_ubsan + - stress_test_amd_msan + - ast_fuzzer_amd_debug + - ast_fuzzer_arm_asan + - ast_fuzzer_amd_tsan + - ast_fuzzer_amd_msan + - ast_fuzzer_amd_ubsan + - buzzhouse_amd_debug + - buzzhouse_arm_asan + - buzzhouse_amd_tsan + - buzzhouse_amd_msan + - buzzhouse_amd_ubsan + - sqllogic_test + - build_toolchain_pgo_bolt_amd64 + - build_toolchain_pgo_bolt_aarch64 + - finish_workflow + - GrypeScanServer + - GrypeScanKeeper + - RegressionTestsRelease + - RegressionTestsAarch64 + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + steps: + - name: Check out repository code + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 + with: + clear-repository: true + - name: Finalize workflow report + if: ${{ !cancelled() }} + uses: ./.github/actions/create_workflow_report + with: + workflow_config: ${{ toJson(needs) }} + final: true + + SourceUpload: + needs: [config_workflow, build_amd_release] + if: ${{ !failure() && !cancelled() }} + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + env: + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + VERSION: ${{ fromJson(needs.config_workflow.outputs.data).JOB_KV_DATA.version.string }} + steps: + - name: Check out repository code + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 + with: + clear-repository: true + ref: ${{ fromJson(needs.config_workflow.outputs.data).git_ref }} + submodules: true + fetch-depth: 0 + filter: tree:0 + - name: Install aws cli + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: arm64 + - name: Create source tar + run: | + cd .. && tar czf $RUNNER_TEMP/build_source.src.tar.gz ClickHouse/ + - name: Upload source tar + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + S3_PATH="REFs/$GITHUB_REF_NAME/$COMMIT_SHA/build_amd_release" + else + S3_PATH="PRs/$PR_NUMBER/$COMMIT_SHA/build_amd_release" + fi + + aws s3 cp $RUNNER_TEMP/build_source.src.tar.gz s3://altinity-build-artifacts/$S3_PATH/clickhouse-$VERSION.src.tar.gz diff --git a/.github/workflows/pull_request_community.yml b/.github/workflows/pull_request_community.yml new file mode 100644 index 000000000000..76f83b6efd30 --- /dev/null +++ b/.github/workflows/pull_request_community.yml @@ -0,0 +1,4012 @@ +# generated by praktika + +name: Community PR + +on: + workflow_dispatch: + inputs: + no_cache: + description: Run without cache + required: false + type: boolean + default: false + pull_request: + branches: ['antalya', 'releases/*', 'antalya-*'] + +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + GH_TOKEN: ${{ github.token }} + DISABLE_CI_MERGE_COMMIT: ${{ vars.DISABLE_CI_MERGE_COMMIT || '0' }} + DISABLE_CI_CACHE: ${{ github.event.inputs.no_cache || '0' }} + CHECKOUT_REF: ${{ vars.DISABLE_CI_MERGE_COMMIT == '1' && github.event.pull_request.head.sha || '' }} + + + +jobs: + + config_workflow: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [] + if: ${{ github.repository != github.event.pull_request.head.repo.full_name }} + name: "Config Workflow" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + if: ${{ !failure() && env.AWS_ACCESS_KEY_ID && env.AWS_SECRET_ACCESS_KEY }} + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Config Workflow' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Config Workflow' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + fast_test: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RmFzdCB0ZXN0') }} + name: "Fast test" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Fast test" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Fast test' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Fast test' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + build_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }} + name: "Build (amd_debug)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_debug)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_debug)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_debug)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + - name: Upload artifact CH_AMD_DEBUG + uses: actions/upload-artifact@v4 + with: + name: CH_AMD_DEBUG + path: ci/tmp/build/programs/self-extracting/clickhouse + + + - name: Upload artifact DEB_AMD_DEBUG + uses: actions/upload-artifact@v4 + with: + name: DEB_AMD_DEBUG + path: ci/tmp/*.deb + + build_amd_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }} + name: "Build (amd_asan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_asan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_asan)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_asan)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + - name: Upload artifact UNITTEST_AMD_ASAN + uses: actions/upload-artifact@v4 + with: + name: UNITTEST_AMD_ASAN + path: ci/tmp/build/src/unit_tests_dbms + + + - name: Upload artifact CH_AMD_ASAN + uses: actions/upload-artifact@v4 + with: + name: CH_AMD_ASAN + path: ci/tmp/build/programs/self-extracting/clickhouse + + + - name: Upload artifact DEB_AMD_ASAN + uses: actions/upload-artifact@v4 + with: + name: DEB_AMD_ASAN + path: ci/tmp/*.deb + + build_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }} + name: "Build (amd_tsan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_tsan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_tsan)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_tsan)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + - name: Upload artifact UNITTEST_AMD_TSAN + uses: actions/upload-artifact@v4 + with: + name: UNITTEST_AMD_TSAN + path: ci/tmp/build/src/unit_tests_dbms + + + - name: Upload artifact CH_AMD_TSAN + uses: actions/upload-artifact@v4 + with: + name: CH_AMD_TSAN + path: ci/tmp/build/programs/self-extracting/clickhouse + + + - name: Upload artifact DEB_AMD_TSAN + uses: actions/upload-artifact@v4 + with: + name: DEB_AMD_TSAN + path: ci/tmp/*.deb + + build_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9tc2FuKQ==') }} + name: "Build (amd_msan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_msan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_msan)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_msan)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + - name: Upload artifact UNITTEST_AMD_MSAN + uses: actions/upload-artifact@v4 + with: + name: UNITTEST_AMD_MSAN + path: ci/tmp/build/src/unit_tests_dbms + + + - name: Upload artifact CH_AMD_MSAN + uses: actions/upload-artifact@v4 + with: + name: CH_AMD_MSAN + path: ci/tmp/build/programs/self-extracting/clickhouse + + + - name: Upload artifact DEB_AMD_MSAM + uses: actions/upload-artifact@v4 + with: + name: DEB_AMD_MSAM + path: ci/tmp/*.deb + + build_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF91YnNhbik=') }} + name: "Build (amd_ubsan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_ubsan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_ubsan)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_ubsan)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + - name: Upload artifact UNITTEST_AMD_UBSAN + uses: actions/upload-artifact@v4 + with: + name: UNITTEST_AMD_UBSAN + path: ci/tmp/build/src/unit_tests_dbms + + + - name: Upload artifact CH_AMD_UBSAN + uses: actions/upload-artifact@v4 + with: + name: CH_AMD_UBSAN + path: ci/tmp/build/programs/self-extracting/clickhouse + + + - name: Upload artifact DEB_AMD_UBSAN + uses: actions/upload-artifact@v4 + with: + name: DEB_AMD_UBSAN + path: ci/tmp/*.deb + + build_amd_binary: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }} + name: "Build (amd_binary)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_binary)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_binary)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_binary)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + - name: Upload artifact CH_AMD_BINARY + uses: actions/upload-artifact@v4 + with: + name: CH_AMD_BINARY + path: ci/tmp/build/programs/self-extracting/clickhouse + + build_arm_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9hc2FuKQ==') }} + name: "Build (arm_asan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_asan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (arm_asan)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (arm_asan)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + - name: Upload artifact CH_ARM_ASAN + uses: actions/upload-artifact@v4 + with: + name: CH_ARM_ASAN + path: ci/tmp/build/programs/self-extracting/clickhouse + + + - name: Upload artifact DEB_ARM_ASAN + uses: actions/upload-artifact@v4 + with: + name: DEB_ARM_ASAN + path: ci/tmp/*.deb + + build_arm_binary: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9iaW5hcnkp') }} + name: "Build (arm_binary)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_binary)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (arm_binary)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (arm_binary)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + - name: Upload artifact CH_ARM_BIN + uses: actions/upload-artifact@v4 + with: + name: CH_ARM_BIN + path: ci/tmp/build/programs/self-extracting/clickhouse + + build_arm_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, fast_test] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV90c2FuKQ==') }} + name: "Build (arm_tsan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_tsan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (arm_tsan)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (arm_tsan)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + - name: Upload artifact CH_ARM_TSAN + uses: actions/upload-artifact@v4 + with: + name: CH_ARM_TSAN + path: ci/tmp/build/programs/self-extracting/clickhouse + + build_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }} + name: "Build (amd_release)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_release)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_release)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_release)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + - name: Upload artifact CH_AMD_RELEASE + uses: actions/upload-artifact@v4 + with: + name: CH_AMD_RELEASE + path: ci/tmp/build/programs/self-extracting/clickhouse + + + - name: Upload artifact CH_AMD_RELEASE_STRIPPED + uses: actions/upload-artifact@v4 + with: + name: CH_AMD_RELEASE_STRIPPED + path: ci/tmp/build/programs/self-extracting/clickhouse-stripped + + + - name: Upload artifact DEB_AMD_RELEASE + uses: actions/upload-artifact@v4 + with: + name: DEB_AMD_RELEASE + path: ci/tmp/*.deb + + + - name: Upload artifact RPM_AMD_RELEASE + uses: actions/upload-artifact@v4 + with: + name: RPM_AMD_RELEASE + path: ci/tmp/*.rpm + + + - name: Upload artifact TGZ_AMD_RELEASE + uses: actions/upload-artifact@v4 + with: + name: TGZ_AMD_RELEASE + path: ci/tmp/*64.tgz* + + build_arm_release: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }} + name: "Build (arm_release)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_release)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (arm_release)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (arm_release)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + - name: Upload artifact CH_ARM_RELEASE + uses: actions/upload-artifact@v4 + with: + name: CH_ARM_RELEASE + path: ci/tmp/build/programs/self-extracting/clickhouse + + + - name: Upload artifact CH_ARM_RELEASE_STRIPPED + uses: actions/upload-artifact@v4 + with: + name: CH_ARM_RELEASE_STRIPPED + path: ci/tmp/build/programs/self-extracting/clickhouse-stripped + + + - name: Upload artifact DEB_ARM_RELEASE + uses: actions/upload-artifact@v4 + with: + name: DEB_ARM_RELEASE + path: ci/tmp/*.deb + + + - name: Upload artifact RPM_ARM_RELEASE + uses: actions/upload-artifact@v4 + with: + name: RPM_ARM_RELEASE + path: ci/tmp/*.rpm + + + - name: Upload artifact TGZ_ARM_RELEASE + uses: actions/upload-artifact@v4 + with: + name: TGZ_ARM_RELEASE + path: ci/tmp/*64.tgz* + + quick_functional_tests: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow, fast_test, build_amd_debug] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'UXVpY2sgZnVuY3Rpb25hbCB0ZXN0cw==') }} + name: "Quick functional tests" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Quick functional tests" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_DEBUG + uses: actions/download-artifact@v4 + with: + name: CH_AMD_DEBUG + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Quick functional tests' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Quick functional tests' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_asan_distributed_plan_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_ASAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_ASAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_asan_distributed_plan_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_ASAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_ASAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_asan_db_disk_distributed_plan_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGIgZGlzaywgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }} + name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_asan, db disk, distributed plan, sequential)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_ASAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_ASAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_asan, db disk, distributed plan, sequential)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_asan, db disk, distributed plan, sequential)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgcGFyYWxsZWwp') }} + name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_BINARY + uses: actions/download-artifact@v4 + with: + name: CH_AMD_BINARY + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgc2VxdWVudGlhbCk=') }} + name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_BINARY + uses: actions/download-artifact@v4 + with: + name: CH_AMD_BINARY + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }} + name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_BINARY + uses: actions/download-artifact@v4 + with: + name: CH_AMD_BINARY + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }} + name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_BINARY + uses: actions/download-artifact@v4 + with: + name: CH_AMD_BINARY + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_debug_asyncinsert_s3_storage_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }} + name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_DEBUG + uses: actions/download-artifact@v4 + with: + name: CH_AMD_DEBUG + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_debug_asyncinsert_s3_storage_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }} + name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_DEBUG + uses: actions/download-artifact@v4 + with: + name: CH_AMD_DEBUG + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_debug_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_debug, parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, parallel)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_DEBUG + uses: actions/download-artifact@v4 + with: + name: CH_AMD_DEBUG + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_debug_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (amd_debug, sequential)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, sequential)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_DEBUG + uses: actions/download-artifact@v4 + with: + name: CH_AMD_DEBUG + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_tsan_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_tsan, parallel, 1/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, parallel, 1/2)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_TSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_TSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_tsan_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_tsan, parallel, 2/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, parallel, 2/2)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_TSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_TSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_tsan_sequential_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }} + name: "Stateless tests (amd_tsan, sequential, 1/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, sequential, 1/2)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_TSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_TSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_tsan_sequential_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }} + name: "Stateless tests (amd_tsan, sequential, 2/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, sequential, 2/2)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_TSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_TSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_msan_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_msan, parallel, 1/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, parallel, 1/2)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_MSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_MSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_msan_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_msan, parallel, 2/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, parallel, 2/2)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_MSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_MSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_msan_sequential_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }} + name: "Stateless tests (amd_msan, sequential, 1/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, sequential, 1/2)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_MSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_MSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_msan_sequential_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_msan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }} + name: "Stateless tests (amd_msan, sequential, 2/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_msan, sequential, 2/2)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_MSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_MSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_ubsan_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_ubsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_ubsan, parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_ubsan, parallel)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_UBSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_UBSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_ubsan_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_ubsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (amd_ubsan, sequential)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_ubsan, sequential)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_UBSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_UBSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_debug_distributed_plan_s3_storage_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_DEBUG + uses: actions/download-artifact@v4 + with: + name: CH_AMD_DEBUG + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_debug_distributed_plan_s3_storage_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHNlcXVlbnRpYWwp') }} + name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_DEBUG + uses: actions/download-artifact@v4 + with: + name: CH_AMD_DEBUG + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_tsan_s3_storage_parallel_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwsIDEvMik=') }} + name: "Stateless tests (amd_tsan, s3 storage, parallel, 1/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, parallel, 1/2)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_TSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_TSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 1/2)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 1/2)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_tsan_s3_storage_parallel_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwsIDIvMik=') }} + name: "Stateless tests (amd_tsan, s3 storage, parallel, 2/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, parallel, 2/2)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_TSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_TSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 2/2)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel, 2/2)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_tsan_s3_storage_sequential_1_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMS8yKQ==') }} + name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_TSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_TSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_tsan_s3_storage_sequential_2_2: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMi8yKQ==') }} + name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_TSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_TSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_arm_binary_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, fast_test, build_arm_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBwYXJhbGxlbCk=') }} + name: "Stateless tests (arm_binary, parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_binary, parallel)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_ARM_BIN + uses: actions/download-artifact@v4 + with: + name: CH_ARM_BIN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_arm_binary_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBzZXF1ZW50aWFsKQ==') }} + name: "Stateless tests (arm_binary, sequential)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_binary, sequential)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_ARM_BIN + uses: actions/download-artifact@v4 + with: + name: CH_ARM_BIN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_amd_asan_db_disk_old_analyzer_1_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 1/6)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_ASAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_ASAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 1/6)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 1/6)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_amd_asan_db_disk_old_analyzer_2_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 2/6)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_ASAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_ASAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 2/6)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 2/6)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_amd_asan_db_disk_old_analyzer_3_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 3/6)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_ASAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_ASAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 3/6)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 3/6)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_amd_asan_db_disk_old_analyzer_4_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 4/6)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_ASAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_ASAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 4/6)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 4/6)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_amd_asan_db_disk_old_analyzer_5_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 5/6)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_ASAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_ASAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 5/6)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 5/6)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_amd_asan_db_disk_old_analyzer_6_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBkYiBkaXNrLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }} + name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_asan, db disk, old analyzer, 6/6)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_ASAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_ASAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 6/6)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (amd_asan, db disk, old analyzer, 6/6)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_amd_binary_1_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDEvNSk=') }} + name: "Integration tests (amd_binary, 1/5)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 1/5)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_BINARY + uses: actions/download-artifact@v4 + with: + name: CH_AMD_BINARY + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_amd_binary_2_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDIvNSk=') }} + name: "Integration tests (amd_binary, 2/5)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 2/5)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_BINARY + uses: actions/download-artifact@v4 + with: + name: CH_AMD_BINARY + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_amd_binary_3_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDMvNSk=') }} + name: "Integration tests (amd_binary, 3/5)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 3/5)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_BINARY + uses: actions/download-artifact@v4 + with: + name: CH_AMD_BINARY + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_amd_binary_4_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDQvNSk=') }} + name: "Integration tests (amd_binary, 4/5)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 4/5)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_BINARY + uses: actions/download-artifact@v4 + with: + name: CH_AMD_BINARY + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_amd_binary_5_5: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_binary, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDUvNSk=') }} + name: "Integration tests (amd_binary, 5/5)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_binary, 5/5)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_BINARY + uses: actions/download-artifact@v4 + with: + name: CH_AMD_BINARY + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_arm_binary_distributed_plan_1_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDEvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 1/4)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 1/4)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_ARM_BIN + uses: actions/download-artifact@v4 + with: + name: CH_ARM_BIN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_arm_binary_distributed_plan_2_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDIvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 2/4)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 2/4)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_ARM_BIN + uses: actions/download-artifact@v4 + with: + name: CH_ARM_BIN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_arm_binary_distributed_plan_3_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDMvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 3/4)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 3/4)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_ARM_BIN + uses: actions/download-artifact@v4 + with: + name: CH_ARM_BIN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_arm_binary_distributed_plan_4_4: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDQvNCk=') }} + name: "Integration tests (arm_binary, distributed plan, 4/4)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (arm_binary, distributed plan, 4/4)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_ARM_BIN + uses: actions/download-artifact@v4 + with: + name: CH_ARM_BIN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_amd_tsan_1_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }} + name: "Integration tests (amd_tsan, 1/6)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 1/6)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_TSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_TSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_amd_tsan_2_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }} + name: "Integration tests (amd_tsan, 2/6)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 2/6)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_TSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_TSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_amd_tsan_3_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }} + name: "Integration tests (amd_tsan, 3/6)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 3/6)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_TSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_TSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_amd_tsan_4_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }} + name: "Integration tests (amd_tsan, 4/6)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 4/6)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_TSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_TSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_amd_tsan_5_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }} + name: "Integration tests (amd_tsan, 5/6)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 5/6)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_TSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_TSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + integration_tests_amd_tsan_6_6: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_arm_binary, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }} + name: "Integration tests (amd_tsan, 6/6)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Integration tests (amd_tsan, 6/6)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_TSAN + uses: actions/download-artifact@v4 + with: + name: CH_AMD_TSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + unit_tests_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_asan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAoYXNhbik=') }} + name: "Unit tests (asan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (asan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact UNITTEST_AMD_ASAN + uses: actions/download-artifact@v4 + with: + name: UNITTEST_AMD_ASAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Unit tests (asan)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Unit tests (asan)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + unit_tests_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_tsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAodHNhbik=') }} + name: "Unit tests (tsan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (tsan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact UNITTEST_AMD_TSAN + uses: actions/download-artifact@v4 + with: + name: UNITTEST_AMD_TSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Unit tests (tsan)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Unit tests (tsan)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + unit_tests_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_msan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAobXNhbik=') }} + name: "Unit tests (msan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (msan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact UNITTEST_AMD_MSAN + uses: actions/download-artifact@v4 + with: + name: UNITTEST_AMD_MSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Unit tests (msan)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Unit tests (msan)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + unit_tests_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_ubsan] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'VW5pdCB0ZXN0cyAodWJzYW4p') }} + name: "Unit tests (ubsan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Unit tests (ubsan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact UNITTEST_AMD_UBSAN + uses: actions/download-artifact@v4 + with: + name: UNITTEST_AMD_UBSAN + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Unit tests (ubsan)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Unit tests (ubsan)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + install_packages_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, build_amd_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX3JlbGVhc2Up') }} + name: "Install packages (amd_release)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (amd_release)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_AMD_RELEASE + uses: actions/download-artifact@v4 + with: + name: CH_AMD_RELEASE + path: ./ci/tmp + + + - name: Download artifact DEB_AMD_RELEASE + uses: actions/download-artifact@v4 + with: + name: DEB_AMD_RELEASE + path: ./ci/tmp + + + - name: Download artifact RPM_AMD_RELEASE + uses: actions/download-artifact@v4 + with: + name: RPM_AMD_RELEASE + path: ./ci/tmp + + + - name: Download artifact TGZ_AMD_RELEASE + uses: actions/download-artifact@v4 + with: + name: TGZ_AMD_RELEASE + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Install packages (amd_release)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Install packages (amd_release)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + install_packages_arm_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYXJtX3JlbGVhc2Up') }} + name: "Install packages (arm_release)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (arm_release)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact CH_ARM_RELEASE + uses: actions/download-artifact@v4 + with: + name: CH_ARM_RELEASE + path: ./ci/tmp + + + - name: Download artifact DEB_ARM_RELEASE + uses: actions/download-artifact@v4 + with: + name: DEB_ARM_RELEASE + path: ./ci/tmp + + + - name: Download artifact RPM_ARM_RELEASE + uses: actions/download-artifact@v4 + with: + name: RPM_ARM_RELEASE + path: ./ci/tmp + + + - name: Download artifact TGZ_ARM_RELEASE + uses: actions/download-artifact@v4 + with: + name: TGZ_ARM_RELEASE + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Install packages (arm_release)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Install packages (arm_release)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + compatibility_check_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, build_amd_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYW1kX3JlbGVhc2Up') }} + name: "Compatibility check (amd_release)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Compatibility check (amd_release)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact DEB_AMD_RELEASE + uses: actions/download-artifact@v4 + with: + name: DEB_AMD_RELEASE + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Compatibility check (amd_release)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Compatibility check (amd_release)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi + + compatibility_check_arm_release: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [config_workflow, fast_test, build_amd_debug, build_amd_asan, build_arm_binary, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYXJtX3JlbGVhc2Up') }} + name: "Compatibility check (arm_release)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Compatibility check (arm_release)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Download artifact DEB_ARM_RELEASE + uses: actions/download-artifact@v4 + with: + name: DEB_ARM_RELEASE + path: ./ci/tmp + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Compatibility check (arm_release)' --workflow "Community PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Compatibility check (arm_release)' --workflow "Community PR" --ci |& tee ./ci/tmp/job.log + fi diff --git a/.github/workflows/regression-reusable-suite.yml b/.github/workflows/regression-reusable-suite.yml new file mode 100644 index 000000000000..5c0fab69c701 --- /dev/null +++ b/.github/workflows/regression-reusable-suite.yml @@ -0,0 +1,193 @@ +name: Regression suite +on: + workflow_call: + inputs: + ref: + description: "Commit SHA to checkout. Default: current (empty string)." + type: string + default: "" + workflow_config: + required: true + type: string + flags: + required: false + type: string + output_format: + required: true + type: string + extra_args: + required: false + type: string + suite_name: + required: true + type: string + suite_executable: + required: false + type: string + default: "regression.py" + timeout_minutes: + required: true + type: number + storage_path: + required: false + type: string + default: "" + regression_args: + required: false + type: string + default: "" + runner_type: + required: false + type: string + default: "" + runner_arch: + required: false + type: string + default: "x86" + job_name: + required: false + type: string + default: "" + part: + required: false + type: string + default: "" + build_sha: + required: false + type: string + default: "" + set_commit_status: + required: false + type: boolean + default: false +jobs: + suite: + name: ${{ format('{0}{1}', inputs.job_name != '' && inputs.job_name || inputs.suite_name, inputs.part != '' && format('_{0}', inputs.part) || '') }} + runs-on: [ + "self-hosted", + "altinity-on-demand", + "${{ inputs.runner_type }}", + ] + timeout-minutes: ${{ inputs.timeout_minutes }} + env: + SUITE: ${{ inputs.suite_name }} + SUITE_EXECUTABLE: ${{ inputs.suite_executable }} + STORAGE: ${{ inputs.storage_path }} + PART: ${{ inputs.part }} + REPORT_JOB_NAME: ${{ format('{0}{1}', inputs.job_name != '' && inputs.job_name || inputs.suite_name, inputs.part != '' && format('_{0}', inputs.part) || '') }} + # AWS credentials + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + # Docker credentials + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + # Database credentials + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + CHECKS_DATABASE_USER: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CHECKS_DATABASE_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + # LocalStack token + LOCALSTACK_AUTH_TOKEN: ${{ secrets.LOCALSTACK_AUTH_TOKEN }} + # Python encoding + PYTHONIOENCODING: utf-8 + build_sha: ${{ inputs.build_sha }} + pr_number: ${{ github.event.number }} + artifacts: builds + # Args + args: --test-to-end + --no-colors + --local + --collect-service-logs + --output ${{ inputs.output_format }} + --attr project="${GITHUB_REPOSITORY}" project.id="${GITHUB_REPOSITORY_ID}" user.name="${GITHUB_ACTOR}" version="${{ fromJson(inputs.workflow_config).JOB_KV_DATA.version.string }}" package="$clickhouse_path" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$REPORT_JOB_NAME" job.retry=$GITHUB_RUN_ATTEMPT job.url="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}" arch="$(uname -i)" + --cicd + --log raw.log + ${{ inputs.flags != 'none' && inputs.flags || ''}} + ${{ inputs.extra_args }} + artifact_paths: | + ./report.html + ./*.log.txt + ./*.log + ./*.html + ./*/_instances/*.log + ./*/_instances/*/logs/*.log + ./*/*/_instances/*/logs/*.log + ./*/*/_instances/*.log + + steps: + - name: ⤵️ Checkout + uses: actions/checkout@v4 + with: + repository: Altinity/clickhouse-regression + ref: ${{ inputs.ref }} + + - name: ♻️ Cache setup + uses: ./.github/actions/cache-setup + + - name: 🛠️ Setup + run: .github/setup.sh + + - name: 📦 Get deb url + env: + S3_BASE_URL: https://altinity-build-artifacts.s3.amazonaws.com/ + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + run: | + REPORTS_PATH=${{ runner.temp }}/reports_dir + mkdir -p $REPORTS_PATH + cat > $REPORTS_PATH/workflow_config.json << 'EOF' + ${{ toJson(fromJson(inputs.workflow_config).WORKFLOW_CONFIG) }} + EOF + + python3 .github/get-deb-url.py --github-env $GITHUB_ENV --workflow-config $REPORTS_PATH/workflow_config.json --s3-base-url $S3_BASE_URL --pr-number $PR_NUMBER --branch-name ${{ github.ref_name }} --commit-hash ${{ inputs.build_sha || github.sha }} --binary + + - name: 🔄 Process regression args + run: | + REGRESSION_ARGS='${{ inputs.regression_args }}' + # AWS replacements + REGRESSION_ARGS="${REGRESSION_ARGS//'{{AWS_BUCKET}}'/${{ secrets.REGRESSION_AWS_S3_BUCKET }}}" + REGRESSION_ARGS="${REGRESSION_ARGS//'{{AWS_REGION}}'/${{ secrets.REGRESSION_AWS_S3_REGION }}}" + REGRESSION_ARGS="${REGRESSION_ARGS//'{{AWS_KEY_ID}}'/${{ secrets.REGRESSION_AWS_S3_KEY_ID }}}" + REGRESSION_ARGS="${REGRESSION_ARGS//'{{AWS_ACCESS_KEY}}'/${{ secrets.REGRESSION_AWS_S3_SECRET_ACCESS_KEY }}}" + # GCS replacements + REGRESSION_ARGS="${REGRESSION_ARGS//'{{GCS_URI}}'/${{ secrets.REGRESSION_GCS_URI }}}" + REGRESSION_ARGS="${REGRESSION_ARGS//'{{GCS_KEY_ID}}'/${{ secrets.REGRESSION_GCS_KEY_ID }}}" + REGRESSION_ARGS="${REGRESSION_ARGS//'{{GCS_KEY_SECRET}}'/${{ secrets.REGRESSION_GCS_KEY_SECRET }}}" + # Azure replacements + REGRESSION_ARGS="${REGRESSION_ARGS//'{{AZURE_ACCOUNT_NAME}}'/${{ secrets.AZURE_ACCOUNT_NAME }}}" + REGRESSION_ARGS="${REGRESSION_ARGS//'{{AZURE_STORAGE_KEY}}'/${{ secrets.AZURE_STORAGE_KEY }}}" + REGRESSION_ARGS="${REGRESSION_ARGS//'{{AZURE_CONTAINER_NAME}}'/${{ secrets.AZURE_CONTAINER_NAME }}}" + echo "REGRESSION_ARGS=$REGRESSION_ARGS" >> $GITHUB_ENV + + - name: 🧪 Run ${{ env.SUITE }} suite + id: run_suite + run: python3 + -u ${{ env.SUITE }}/${{ env.SUITE_EXECUTABLE }} + --clickhouse ${{ env.clickhouse_path }} + ${{ env.REGRESSION_ARGS }} + ${{ env.args }} || EXITCODE=$?; + .github/add_link_to_logs.sh; + exit $EXITCODE + + - name: 📊 Set Commit Status + if: ${{ !cancelled() && inputs.set_commit_status }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + JOB_OUTCOME: ${{ steps.run_suite.outcome }} + SUITE_NAME: ${{ format('Regression {0} {1}{2}', inputs.runner_arch, inputs.job_name != '' && inputs.job_name || inputs.suite_name, inputs.part != '' && format('_{0}', inputs.part) || '') }} + run: python3 .github/set_builds_status.py + + - name: 📝 Create and upload logs + if: ${{ !cancelled() }} + run: .github/create_and_upload_logs.sh 1 + + - name: 📤 Upload logs to results database + if: ${{ !cancelled() }} + timeout-minutes: 20 + run: .github/upload_results_to_database.sh 1 + + - uses: actions/upload-artifact@v4 + if: ${{ !cancelled() }} + with: + name: ${{ format('{0}{1}-artifacts-{2}{3}', inputs.job_name != '' && inputs.job_name || inputs.suite_name, inputs.part != '' && format('_{0}', inputs.part) || '', inputs.runner_arch, contains(inputs.extra_args, '--use-keeper') && '_keeper' || '_zookeeper') }} + path: ${{ env.artifact_paths }} + diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml new file mode 100644 index 000000000000..d64ab984d012 --- /dev/null +++ b/.github/workflows/regression.yml @@ -0,0 +1,508 @@ +name: Regression test workflow - Release +'on': + workflow_call: + inputs: + runner_type: + description: the (meta-)label of runner to use + required: true + type: string + commit: + description: commit hash of the regression tests. + required: true + type: string + arch: + description: arch to run the tests on. + required: true + type: string + timeout_minutes: + description: Maximum number of minutes to let workflow run before GitHub cancels it. + default: 210 + type: number + build_sha: + description: commit sha of the workflow run for artifact upload. + required: true + type: string + checkout_depth: + description: the value of the git shallow checkout + required: false + type: number + default: 1 + submodules: + description: if the submodules should be checked out + required: false + type: boolean + default: false + additional_envs: + description: additional ENV variables to setup the job + type: string + workflow_config: + description: workflow config for the run + required: true + type: string + secrets: + secret_envs: + description: if given, it's passed to the environments + required: false + AWS_SECRET_ACCESS_KEY: + description: the access key to the aws param store. + required: true + AWS_ACCESS_KEY_ID: + description: the access key id to the aws param store. + required: true + AWS_DEFAULT_REGION: + description: the region of the aws param store. + required: true + AWS_REPORT_KEY_ID: + description: aws s3 key id used for regression test reports. + required: true + AWS_REPORT_SECRET_ACCESS_KEY: + description: aws s3 secret access key used for regression test reports. + required: true + AWS_REPORT_REGION: + description: aws s3 region used for regression test reports. + required: true + DOCKER_USERNAME: + description: username of the docker user. + required: true + DOCKER_PASSWORD: + description: password to the docker user. + required: true + REGRESSION_AWS_S3_BUCKET: + description: aws s3 bucket used for regression tests. + required: true + REGRESSION_AWS_S3_KEY_ID: + description: aws s3 key id used for regression tests. + required: true + REGRESSION_AWS_S3_SECRET_ACCESS_KEY: + description: aws s3 secret access key used for regression tests. + required: true + REGRESSION_AWS_S3_REGION: + description: aws s3 region used for regression tests. + required: true + REGRESSION_GCS_KEY_ID: + description: gcs key id used for regression tests. + required: true + REGRESSION_GCS_KEY_SECRET: + description: gcs key secret used for regression tests. + required: true + REGRESSION_GCS_URI: + description: gcs uri used for regression tests. + required: true + +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + CHECKS_DATABASE_USER: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CHECKS_DATABASE_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + args: --test-to-end + --no-colors + --local + --collect-service-logs + --output new-fails + --parallel 1 + --log raw.log + --with-analyzer + artifact_paths: | + ./report.html + ./*.log.txt + ./*.log + ./*.html + ./*/_instances/*.log + ./*/_instances/*/logs/*.log + ./*/*/_instances/*/logs/*.log + ./*/*/_instances/*.log + +jobs: + Common: + if: | + fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs, 'common') + strategy: + fail-fast: false + matrix: + SUITE: [aes_encryption, atomic_insert, attach, base_58, clickhouse_keeper_failover,data_types, datetime64_extended_range, disk_level_encryption, dns, engines, example, extended_precision_data_types, functions, jwt_authentication, kafka, kerberos, key_value, lightweight_delete, memory, part_moves_between_shards, selects, session_timezone, settings, version, window_functions] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: ${{ matrix.SUITE }} + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: ${{ matrix.SUITE }} + secrets: inherit + + AggregateFunctions: + if: | + fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs, 'aggregate_functions') + strategy: + fail-fast: false + matrix: + PART: [1, 2, 3] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: aggregate_functions + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + part: ${{ matrix.PART }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: aggregate_functions + extra_args: --only "part ${{ matrix.PART }}/*" + secrets: inherit + Alter: + if: | + fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs, 'alter') + strategy: + fail-fast: false + matrix: + ONLY: [replace, move] + include: + - ONLY: attach + PART: 1 + - ONLY: attach + PART: 2 + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: alter + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + storage_path: /${{ matrix.ONLY }}_partition + part: ${{ matrix.PART }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: alter_${{ matrix.ONLY }} + extra_args: --only "/alter/${{ matrix.ONLY }} partition/${{ matrix.PART && format('part {0}/', matrix.PART) || '' }}*" + secrets: inherit + + Benchmark: + if: | + fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs, 'benchmark') + strategy: + fail-fast: false + matrix: + STORAGE: [minio, aws_s3, gcs] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: ontime_benchmark + suite_executable: benchmark.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + storage_path: /${{ matrix.STORAGE }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: benchmark_${{ matrix.STORAGE }} + regression_args: --storage ${{ matrix.STORAGE }} --gcs-uri {{GCS_URI}} --gcs-key-id {{GCS_KEY_ID}} --gcs-key-secret {{GCS_KEY_SECRET}} --aws-s3-bucket {{AWS_BUCKET}} --aws-s3-region {{AWS_REGION}} --aws-s3-key-id {{AWS_KEY_ID}} --aws-s3-access-key {{AWS_ACCESS_KEY}} + secrets: inherit + + ClickHouseKeeper: + if: | + fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs, 'clickhouse_keeper') + strategy: + fail-fast: false + matrix: + PART: [1, 2] + SSL: [ssl, no_ssl] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: clickhouse_keeper + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + storage_path: /${{ matrix.SSL }} + part: ${{ matrix.PART }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: clickhouse_keeper_${{ matrix.SSL }} + extra_args: ${{ matrix.SSL == 'ssl' && '--ssl' || '' }} --only "part ${{ matrix.PART }}/*" + secrets: inherit + + Iceberg: + if: | + fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs, 'iceberg') + strategy: + fail-fast: false + matrix: + PART: [1, 2] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: iceberg + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + part: ${{ matrix.PART }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: iceberg + extra_args: --only ${{ matrix.PART == 1 && '"/iceberg/iceberg engine/rest catalog/*" "/iceberg/s3 table function/*" "/iceberg/icebergS3 table function/*" "/iceberg/iceberg cache/*"' || '"/iceberg/iceberg engine/glue catalog/*" "/iceberg/iceberg table engine/*"' }} + secrets: inherit + LDAP: + if: | + fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs, 'ldap') + strategy: + fail-fast: false + matrix: + SUITE: [authentication, external_user_directory, role_mapping] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: ldap/${{ matrix.SUITE }} + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: ldap_${{ matrix.SUITE }} + secrets: inherit + + Parquet: + if: | + fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs, 'parquet') + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: parquet + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: parquet + secrets: inherit + + ParquetS3: + if: | + fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs, 'parquet') + strategy: + fail-fast: false + matrix: + STORAGE: [minio, aws_s3] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: parquet + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + storage_path: ${{ matrix.STORAGE }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: parquet_${{ matrix.STORAGE }} + regression_args: --storage ${{ matrix.STORAGE }} --aws-s3-bucket {{AWS_BUCKET}} --aws-s3-region {{AWS_REGION}} --aws-s3-key-id {{AWS_KEY_ID}} --aws-s3-access-key {{AWS_ACCESS_KEY}} --only "/parquet/${{ matrix.STORAGE }}/*" + secrets: inherit + + RBAC: + if: | + fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs, 'rbac') + strategy: + fail-fast: false + matrix: + PART: [1, 2, 3] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: rbac + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + part: ${{ matrix.PART }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: rbac + extra_args: --only "/rbac/part ${{ matrix.PART }}/*" + secrets: inherit + SSLServer: + if: | + fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs, 'ssl_server') + strategy: + fail-fast: false + matrix: + PART: [1, 2, 3] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: ssl_server + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + part: ${{ matrix.PART }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: ssl_server + extra_args: --only "part ${{ matrix.PART }}/*" + secrets: inherit + + S3: + if: | + fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs, 's3') + strategy: + fail-fast: false + matrix: + STORAGE: [aws_s3, gcs, azure, minio] + PART: [1, 2] + include: + - STORAGE: minio + PART: 3 + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: s3 + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + storage_path: /${{ matrix.STORAGE }} + part: ${{ matrix.PART }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: s3_${{ matrix.STORAGE }} + regression_args: --storage ${{ matrix.STORAGE }} --gcs-uri {{GCS_URI}} --gcs-key-id {{GCS_KEY_ID}} --gcs-key-secret {{GCS_KEY_SECRET}} --aws-s3-bucket {{AWS_BUCKET}} --aws-s3-region {{AWS_REGION}} --aws-s3-key-id {{AWS_KEY_ID}} --aws-s3-access-key {{AWS_ACCESS_KEY}} --azure-account-name {{AZURE_ACCOUNT_NAME}} --azure-storage-key {{AZURE_STORAGE_KEY}} --azure-container {{AZURE_CONTAINER_NAME}} + extra_args: --only ":/try*" ":/part ${{ matrix.PART }}/*" + secrets: inherit + + S3Export: + if: | + fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs, 's3_export') + strategy: + fail-fast: false + matrix: + PART: [part, partition] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: s3 + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + storage_path: /minio + part: ${{ matrix.PART }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: s3_export + regression_args: --storage minio + extra_args: --only ":/try*" "minio/export tests/export ${{ matrix.PART }}/*" + secrets: inherit + + Swarms: + if: | + fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs, 'swarms') + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: swarms + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: swarms + secrets: inherit + + TieredStorage: + if: | + fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs[0] == null || + contains(fromJson(inputs.workflow_config).JOB_KV_DATA.ci_regression_jobs, 'tiered_storage') + strategy: + fail-fast: false + matrix: + STORAGE: [local, minio, s3amazon, s3gcs] + uses: ./.github/workflows/regression-reusable-suite.yml + with: + ref: ${{ inputs.commit }} + workflow_config: ${{ inputs.workflow_config }} + suite_name: tiered_storage + suite_executable: regression.py + output_format: new-fails + flags: --with-analyzer + timeout_minutes: ${{ inputs.timeout_minutes }} + runner_arch: ${{ inputs.arch }} + runner_type: ${{ inputs.runner_type }} + storage_path: /${{ matrix.STORAGE }} + build_sha: ${{ inputs.build_sha }} + set_commit_status: true + job_name: tiered_storage_${{ matrix.STORAGE }} + regression_args: --aws-s3-access-key {{AWS_ACCESS_KEY}} --aws-s3-key-id {{AWS_KEY_ID}} --aws-s3-uri https://s3.{{AWS_REGION}}.amazonaws.com/{{AWS_BUCKET}}/data/ --gcs-key-id {{GCS_KEY_ID}} --gcs-key-secret {{GCS_KEY_SECRET}} --gcs-uri {{GCS_URI}} + extra_args: ${{ matrix.STORAGE != 'local' && format('--with-{0}', matrix.STORAGE) || '' }} + secrets: inherit diff --git a/.github/workflows/release_builds.yml b/.github/workflows/release_builds.yml new file mode 100644 index 000000000000..cc73fbf821b1 --- /dev/null +++ b/.github/workflows/release_builds.yml @@ -0,0 +1,1390 @@ +# generated by praktika + +name: Release Builds +on: + workflow_dispatch: + inputs: + +env: + PYTHONUNBUFFERED: 1 + GH_TOKEN: ${{ github.token }} + CHECKOUT_REF: "" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }} + CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + AZURE_STORAGE_KEY: ${{ secrets.AZURE_STORAGE_KEY }} + AZURE_ACCOUNT_NAME: ${{ secrets.AZURE_ACCOUNT_NAME }} + AZURE_CONTAINER_NAME: ${{ secrets.AZURE_CONTAINER_NAME }} + AZURE_STORAGE_ACCOUNT_URL: "https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/" + ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }} + + +jobs: + + config_workflow: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [] + name: "Config Workflow" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Config Workflow" + + - name: Note report location to summary + if: ${{ !failure() && env.AWS_ACCESS_KEY_ID && env.AWS_SECRET_ACCESS_KEY }} + env: + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA" + else + PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA" + fi + REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html + echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Config Workflow' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Config Workflow' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + dockers_build_amd: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }} + name: "Dockers Build (amd)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (amd)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Dockers Build (amd)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Dockers Build (amd)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + dockers_build_arm: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [config_workflow] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }} + name: "Dockers Build (arm)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (arm)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Dockers Build (arm)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Dockers Build (arm)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + dockers_build_multiplatform_manifest: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow, dockers_build_amd, dockers_build_arm] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VycyBCdWlsZCAobXVsdGlwbGF0Zm9ybSBtYW5pZmVzdCk=') }} + name: "Dockers Build (multiplatform manifest)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Dockers Build (multiplatform manifest)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_amd_debug: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary, build_arm_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }} + name: "Build (amd_debug)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_debug)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_debug)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_debug)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_amd_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary, build_arm_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }} + name: "Build (amd_asan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_asan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_asan)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_asan)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_amd_tsan: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary, build_arm_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }} + name: "Build (amd_tsan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_tsan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_tsan)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_tsan)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_amd_msan: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary, build_arm_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9tc2FuKQ==') }} + name: "Build (amd_msan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_msan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_msan)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_msan)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_amd_ubsan: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary, build_arm_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF91YnNhbik=') }} + name: "Build (amd_ubsan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_ubsan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_ubsan)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_ubsan)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_amd_binary: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }} + name: "Build (amd_binary)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_binary)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_binary)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_binary)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_arm_asan: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary, build_arm_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9hc2FuKQ==') }} + name: "Build (arm_asan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_asan)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (arm_asan)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (arm_asan)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_arm_binary: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9iaW5hcnkp') }} + name: "Build (arm_binary)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_binary)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (arm_binary)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (arm_binary)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }} + name: "Build (amd_release)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (amd_release)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (amd_release)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (amd_release)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + build_arm_release: + runs-on: [self-hosted, altinity-on-demand, altinity-builder] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }} + name: "Build (arm_release)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Build (arm_release)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Build (arm_release)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Build (arm_release)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + docker_server_image: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, build_arm_release] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} + name: "Docker server image" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker server image" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Docker server image' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Docker server image' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + docker_keeper_image: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, build_arm_release] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} + name: "Docker keeper image" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Docker keeper image" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Docker keeper image' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Docker keeper image' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + install_packages_amd_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX3JlbGVhc2Up') }} + name: "Install packages (amd_release)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (amd_release)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Install packages (amd_release)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Install packages (amd_release)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + install_packages_arm_release: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYXJtX3JlbGVhc2Up') }} + name: "Install packages (arm_release)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Install packages (arm_release)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Install packages (arm_release)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Install packages (arm_release)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgcGFyYWxsZWwp') }} + name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgc2VxdWVudGlhbCk=') }} + name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }} + name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }} + name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_arm_binary_parallel: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBwYXJhbGxlbCk=') }} + name: "Stateless tests (arm_binary, parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_binary, parallel)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + stateless_tests_arm_binary_sequential: + runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBzZXF1ZW50aWFsKQ==') }} + name: "Stateless tests (arm_binary, sequential)" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Stateless tests (arm_binary, sequential)" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + + finish_workflow: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary, build_amd_release, build_arm_release, docker_server_image, docker_keeper_image, install_packages_amd_release, install_packages_arm_release, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential, stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel, stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential] + if: ${{ always() }} + name: "Finish Workflow" + outputs: + data: ${{ steps.run.outputs.DATA }} + pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ env.CHECKOUT_REF }} + + - name: Setup + uses: ./.github/actions/runner_setup + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + test_name: "Finish Workflow" + + - name: Prepare env script + run: | + rm -rf ./ci/tmp + mkdir -p ./ci/tmp + cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:.: + cat > ./ci/tmp/workflow_inputs.json << 'EOF' + ${{ toJson(github.event.inputs) }} + EOF + cat > ./ci/tmp/workflow_job.json << 'EOF' + ${{ toJson(job) }} + EOF + cat > ./ci/tmp/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + - name: Run + id: run + run: | + . ./ci/tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run 'Finish Workflow' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log + else + python3 -m praktika run 'Finish Workflow' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log + fi + +########################################################################################## +##################################### ALTINITY JOBS ###################################### +########################################################################################## + + GrypeScanServer: + needs: [config_workflow, docker_server_image] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }} + strategy: + fail-fast: false + matrix: + suffix: ['', '-alpine'] + uses: ./.github/workflows/grype_scan.yml + secrets: inherit + with: + docker_image: altinityinfra/clickhouse-server + version: ${{ fromJson(needs.config_workflow.outputs.data).JOB_KV_DATA.version.string }} + tag-suffix: ${{ matrix.suffix }} + GrypeScanKeeper: + needs: [config_workflow, docker_keeper_image] + if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }} + uses: ./.github/workflows/grype_scan.yml + secrets: inherit + with: + docker_image: altinityinfra/clickhouse-keeper + version: ${{ fromJson(needs.config_workflow.outputs.data).JOB_KV_DATA.version.string }} + + SignRelease: + needs: [config_workflow, build_amd_release] + if: ${{ !failure() && !cancelled() }} + uses: ./.github/workflows/reusable_sign.yml + secrets: inherit + with: + test_name: Sign release + runner_type: altinity-style-checker + data: ${{ needs.config_workflow.outputs.data }} + SignAarch64: + needs: [config_workflow, build_arm_release] + if: ${{ !failure() && !cancelled() }} + uses: ./.github/workflows/reusable_sign.yml + secrets: inherit + with: + test_name: Sign aarch64 + runner_type: altinity-style-checker-aarch64 + data: ${{ needs.config_workflow.outputs.data }} + + FinishCIReport: + if: ${{ !cancelled() }} + needs: + - config_workflow + - dockers_build_amd + - dockers_build_arm + - dockers_build_multiplatform_manifest + - build_amd_debug + - build_amd_asan + - build_amd_tsan + - build_amd_msan + - build_amd_ubsan + - build_amd_binary + - build_arm_asan + - build_arm_binary + - build_amd_release + - build_arm_release + - docker_server_image + - docker_keeper_image + - install_packages_amd_release + - install_packages_arm_release + - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel + - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential + - stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel + - stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential + - stateless_tests_arm_binary_parallel + - stateless_tests_arm_binary_sequential + - finish_workflow + - GrypeScanServer + - GrypeScanKeeper + - SignRelease + - SignAarch64 + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + steps: + - name: Check out repository code + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 + with: + clear-repository: true + - name: Finalize workflow report + if: ${{ !cancelled() }} + uses: ./.github/actions/create_workflow_report + with: + workflow_config: ${{ toJson(needs) }} + final: true + + SourceUpload: + needs: [config_workflow, build_amd_release] + if: ${{ !failure() && !cancelled() }} + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64] + env: + COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + PR_NUMBER: ${{ github.event.pull_request.number || 0 }} + VERSION: ${{ fromJson(needs.config_workflow.outputs.data).JOB_KV_DATA.version.string }} + steps: + - name: Check out repository code + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 + with: + clear-repository: true + ref: ${{ fromJson(needs.config_workflow.outputs.data).git_ref }} + submodules: true + fetch-depth: 0 + filter: tree:0 + - name: Install aws cli + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: arm64 + - name: Create source tar + run: | + cd .. && tar czf $RUNNER_TEMP/build_source.src.tar.gz ClickHouse/ + - name: Upload source tar + run: | + if [ "$PR_NUMBER" -eq 0 ]; then + S3_PATH="REFs/$GITHUB_REF_NAME/$COMMIT_SHA/build_amd_release" + else + S3_PATH="PRs/$PR_NUMBER/$COMMIT_SHA/build_amd_release" + fi + + aws s3 cp $RUNNER_TEMP/build_source.src.tar.gz s3://altinity-build-artifacts/$S3_PATH/clickhouse-$VERSION.src.tar.gz diff --git a/.github/workflows/repo-sanity-checks.yml b/.github/workflows/repo-sanity-checks.yml new file mode 100644 index 000000000000..ec50a056b730 --- /dev/null +++ b/.github/workflows/repo-sanity-checks.yml @@ -0,0 +1,150 @@ +name: Repository Sanity Checks + +on: + workflow_dispatch: # Manual trigger only + + workflow_call: + +jobs: + sanity-checks: + runs-on: [self-hosted, altinity-on-demand, altinity-style-checker] + strategy: + fail-fast: false # Continue with other combinations if one fails + matrix: + include: + # Production packages + - env: prod + type: deb + base: ubuntu:22.04 + repo_url: https://builds.altinity.cloud/apt-repo + - env: prod + type: rpm + base: centos:8 + repo_url: https://builds.altinity.cloud/yum-repo + # FIPS Production packages + - env: prod-fips + type: deb + base: ubuntu:22.04 + repo_url: https://builds.altinity.cloud/fips-apt-repo + - env: prod-fips + type: rpm + base: centos:8 + repo_url: https://builds.altinity.cloud/fips-yum-repo + # Staging packages + - env: staging + type: deb + base: ubuntu:22.04 + repo_url: https://builds.staging.altinity.cloud/apt-repo + - env: staging + type: rpm + base: centos:8 + repo_url: https://builds.staging.altinity.cloud/yum-repo + # FIPS Staging packages + - env: staging-fips + type: deb + base: ubuntu:22.04 + repo_url: https://builds.staging.altinity.cloud/fips-apt-repo + - env: staging-fips + type: rpm + base: centos:8 + repo_url: https://builds.staging.altinity.cloud/fips-yum-repo + # Hotfix packages + - env: hotfix + type: deb + base: ubuntu:22.04 + repo_url: https://builds.altinity.cloud/hotfix-apt-repo + - env: hotfix + type: rpm + base: centos:8 + repo_url: https://builds.altinity.cloud/hotfix-yum-repo + # Antalya experimental packages + - env: antalya + type: deb + base: ubuntu:22.04 + repo_url: https://builds.altinity.cloud/antalya-apt-repo + - env: antalya + type: rpm + base: centos:8 + repo_url: https://builds.altinity.cloud/antalya-yum-repo + # Hotfix staging packages + - env: hotfix-staging + type: deb + base: ubuntu:22.04 + repo_url: https://builds.staging.altinity.cloud/hotfix-apt-repo + - env: hotfix-staging + type: rpm + base: centos:8 + repo_url: https://builds.staging.altinity.cloud/hotfix-yum-repo + # Antalya experimental staging packages + - env: antalya-staging + type: deb + base: ubuntu:22.04 + repo_url: https://builds.staging.altinity.cloud/antalya-apt-repo + - env: antalya-staging + type: rpm + base: centos:8 + repo_url: https://builds.staging.altinity.cloud/antalya-yum-repo + + steps: + - name: Run sanity check + run: | + cat << 'EOF' > sanity.sh + #!/bin/bash + set -e -x + + # Package installation commands based on type + if [ "${{ matrix.type }}" = "deb" ]; then + export DEBIAN_FRONTEND=noninteractive + apt-get update && apt-get install -y apt-transport-https ca-certificates curl gnupg2 dialog sudo + mkdir -p /usr/share/keyrings + curl -s "${REPO_URL}/pubkey.gpg" | gpg --dearmor > /usr/share/keyrings/altinity-archive-keyring.gpg + echo "deb [signed-by=/usr/share/keyrings/altinity-archive-keyring.gpg] ${REPO_URL} stable main" > /etc/apt/sources.list.d/altinity.list + apt-get update + apt-get install -y clickhouse-server clickhouse-client + else + sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-* + yum install -y curl gnupg2 sudo + if [[ "${{ matrix.env }}" == *"staging"* ]]; then + curl "${REPO_URL}/altinity-staging.repo" -o /etc/yum.repos.d/altinity-staging.repo + else + curl "${REPO_URL}/altinity.repo" -o /etc/yum.repos.d/altinity.repo + fi + yum install -y clickhouse-server clickhouse-client + fi + + # Ensure correct ownership + chown -R clickhouse /var/lib/clickhouse/ + chown -R clickhouse /var/log/clickhouse-server/ + + # Check server version + server_version=$(clickhouse-server --version) + echo "$server_version" | grep "altinity" || FAILED_SERVER=true + + # Start server and test + sudo -u clickhouse clickhouse-server --config-file /etc/clickhouse-server/config.xml --daemon + sleep 10 + clickhouse-client -q 'SELECT 1' + + # Check client version + client_version=$(clickhouse-client --version) + echo "$client_version" | grep "altinity" || FAILED_CLIENT=true + + # Report results + if [ "$FAILED_SERVER" = true ]; then + echo "::error::Server check failed - Version: $server_version" + exit 1 + elif [ "$FAILED_CLIENT" = true ]; then + echo "::error::Client check failed - Version: $client_version" + exit 1 + else + echo "All checks passed successfully!" + fi + EOF + + chmod +x sanity.sh + docker run --rm \ + -v $(pwd)/sanity.sh:/sanity.sh \ + -e REPO_URL="${{ matrix.repo_url }}" \ + ${{ matrix.base }} \ + /sanity.sh diff --git a/.github/workflows/reusable_sign.yml b/.github/workflows/reusable_sign.yml new file mode 100644 index 000000000000..7bfed2758359 --- /dev/null +++ b/.github/workflows/reusable_sign.yml @@ -0,0 +1,166 @@ +name: Sigining workflow +'on': + workflow_call: + inputs: + test_name: + description: the value of test type from tests/ci/ci_config.py, ends up as $CHECK_NAME ENV + required: true + type: string + runner_type: + description: the label of runner to use + required: true + type: string + run_command: + description: the command to launch the check + default: "" + required: false + type: string + checkout_depth: + description: the value of the git shallow checkout + required: false + type: number + default: 1 + submodules: + description: if the submodules should be checked out + required: false + type: boolean + default: false + additional_envs: + description: additional ENV variables to setup the job + type: string + data: + description: ci data + type: string + required: true + working-directory: + description: sets custom working directory + type: string + default: "$GITHUB_WORKSPACE/tests/ci" + secrets: + secret_envs: + description: if given, it's passed to the environments + required: false + AWS_SECRET_ACCESS_KEY: + description: the access key to the aws param store. + required: true + AWS_ACCESS_KEY_ID: + description: the access key id to the aws param store. + required: true + GPG_BINARY_SIGNING_KEY: + description: gpg signing key for packages. + required: true + GPG_BINARY_SIGNING_PASSPHRASE: + description: gpg signing key passphrase. + required: true + +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + CHECK_NAME: ${{inputs.test_name}} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }} + CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }} + CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }} + +jobs: + runner_labels_setup: + name: Compute proper runner labels for the rest of the jobs + runs-on: ubuntu-latest + outputs: + runner_labels: ${{ steps.setVariables.outputs.runner_labels }} + steps: + - id: setVariables + name: Prepare runner_labels variables for the later steps + run: | + + # Prepend self-hosted + input="self-hosted, altinity-on-demand, ${input}" + + # Remove all whitespace + input="$(echo ${input} | tr -d [:space:])" + # Make something like a JSON array from comma-separated list + input="[ '${input//\,/\'\, \'}' ]" + + echo "runner_labels=$input" >> ${GITHUB_OUTPUT} + env: + input: ${{ inputs.runner_type }} + + Test: + needs: [runner_labels_setup] + runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }} + name: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }} + env: + GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }} + strategy: + fail-fast: false # we always wait for entire matrix + matrix: + batch: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].batches }} + steps: + - name: Check out repository code + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 + with: + clear-repository: true + ref: ${{ fromJson(inputs.data).git_ref }} + submodules: ${{inputs.submodules}} + fetch-depth: ${{inputs.checkout_depth}} + filter: tree:0 + - name: Set build envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + CHECK_NAME=${{ inputs.test_name }} + ${{inputs.additional_envs}} + ${{secrets.secret_envs}} + DOCKER_TAG< 1 }} + run: | + cat >> "$GITHUB_ENV" << 'EOF' + RUN_BY_HASH_NUM=${{matrix.batch}} + RUN_BY_HASH_TOTAL=${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches }} + EOF + - name: Pre run + run: | + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.test_name}}' + - name: Sign release + env: + GPG_BINARY_SIGNING_KEY: ${{ secrets.GPG_BINARY_SIGNING_KEY }} + GPG_BINARY_SIGNING_PASSPHRASE: ${{ secrets.GPG_BINARY_SIGNING_PASSPHRASE }} + run: | + cd "${{ inputs.working-directory }}" + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" \ + --infile ${{ toJson(inputs.data) }} \ + --job-name '${{inputs.test_name}}' \ + --run \ + --force \ + --run-command '''python3 sign_release.py''' + - name: Post run + if: ${{ !cancelled() }} + run: | + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.test_name}}' + - name: Mark as done + if: ${{ !cancelled() }} + run: | + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.test_name}}' --batch ${{matrix.batch}} + - name: Upload signed hashes + uses: actions/upload-artifact@v4 + with: + name: ${{inputs.test_name}} signed-hashes + path: ${{ env.TEMP_PATH }}/*.gpg + - name: Clean + if: always() + uses: ./.github/actions/clean diff --git a/.github/workflows/scheduled_runs.yml b/.github/workflows/scheduled_runs.yml new file mode 100644 index 000000000000..9069ea7685f2 --- /dev/null +++ b/.github/workflows/scheduled_runs.yml @@ -0,0 +1,55 @@ +name: Scheduled Altinity Stable Builds + +on: + schedule: + - cron: '0 0 * * 6' #Weekly run for stable versions + - cron: '0 0 * * *' #Daily run for antalya versions + # Make sure that any changes to this file is actually tested with PRs + pull_request: + types: + - synchronize + - reopened + - opened + paths: + - '**/scheduled_runs.yml' + +jobs: + DailyRuns: + strategy: + fail-fast: false + matrix: + branch: + - antalya + name: ${{ matrix.branch }} + if: github.event.schedule != '0 0 * * 6' + runs-on: ubuntu-latest + steps: + - name: Run ${{ matrix.branch }} workflow + run: | + curl -L \ + -X POST \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${{ secrets.TOKEN }}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/repos/Altinity/ClickHouse/actions/workflows/release_branches.yml/dispatches \ + -d '{"ref":"${{ matrix.branch }}"}' + + WeeklyRuns: + strategy: + fail-fast: false + matrix: + branch: + - customizations/24.8.14 + name: ${{ matrix.branch }} + if: github.event.schedule != '0 0 * * *' + runs-on: ubuntu-latest + steps: + - name: Run ${{ matrix.branch }} workflow + run: | + curl -L \ + -X POST \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${{ secrets.TOKEN }}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/repos/Altinity/ClickHouse/actions/workflows/release_branches.yml/dispatches \ + -d '{"ref":"${{ matrix.branch }}"}' diff --git a/.github/workflows/sign_and_release.yml b/.github/workflows/sign_and_release.yml new file mode 100644 index 000000000000..f5a48dee97f5 --- /dev/null +++ b/.github/workflows/sign_and_release.yml @@ -0,0 +1,567 @@ +name: Sign and Release packages + +on: + workflow_dispatch: + inputs: + workflow_url: + description: 'The URL to the workflow run that produced the packages' + required: true + release_environment: + description: 'The environment to release to. "staging" or "production"' + required: true + default: 'staging' + package_version: + description: 'The version of the package to release' + required: true + type: string + GPG_PASSPHRASE: + description: 'GPG passphrase for signing (required for production releases)' + required: false + type: string + +env: + ARTIFACT_NAME: build_report_package_release + AWS_REGION: us-east-1 + SRC_BUCKET: altinity-build-artifacts + S3_STORAGE_BUCKET: altinity-test-reports + +jobs: + extract-package-info: + runs-on: [altinity-style-checker-aarch64, altinity-on-demand] + outputs: + docker_version: ${{ env.DOCKER_VERSION }}-${{ env.PACKAGE_VERSION }} + commit_hash: ${{ env.COMMIT_HASH }} + folder_time: ${{ env.FOLDER_TIME }} + needs_binary_processing: ${{ env.NEEDS_BINARY_PROCESSING }} + package_version: ${{ env.PACKAGE_VERSION }} + src_dir: ${{ env.SRC_DIR }} + test_results_src: ${{ env.TEST_RESULTS_SRC }} + altinity_build_feature: ${{ env.ALTINITY_BUILD_FEATURE }} + repo_prefix: ${{ env.REPO_PREFIX }} + src_url: ${{ env.SRC_URL }} + dest_url: ${{ env.DEST_URL }} + steps: + - name: Validate inputs + run: | + if [ -z "${{ inputs.workflow_url }}" ]; then + echo "Error: workflow_url is required" + exit 1 + fi + if [ -z "${{ inputs.package_version }}" ]; then + echo "Error: package_version is required" + exit 1 + fi + if [ "${{ inputs.release_environment }}" != "staging" ] && [ "${{ inputs.release_environment }}" != "production" ]; then + echo "Error: release_environment must be either 'staging' or 'production'" + exit 1 + fi + + - name: Download artifact "${{ env.ARTIFACT_NAME }}" + run: | + run_id=$(echo "${{ inputs.workflow_url }}" | grep -oE '[0-9]+$') + + # Get artifact ID + artifact_id=$(curl -s "https://api.github.com/repos/Altinity/ClickHouse/actions/runs/$run_id/artifacts" \ + | jq '.artifacts[] | select(.name == "'"${{ env.ARTIFACT_NAME }}"'") | .id') + + # Download artifact + curl -L -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + -o "${{ env.ARTIFACT_NAME }}" \ + "https://api.github.com/repos/Altinity/ClickHouse/actions/artifacts/$artifact_id/zip" + + - name: Unzip Artifact + run: | + unzip -o "${{ env.ARTIFACT_NAME }}" -d "artifact" + + - name: Extract and Parse JSON File + run: | + cd artifact + JSON_FILE=$(ls | grep "build_report.*package_release\.json" | head -n 1) + if [ -z "$JSON_FILE" ]; then + echo "Error: No JSON file matching the pattern was found" + exit 1 + fi + echo "Found JSON file: ${JSON_FILE}" + + # Extract client URL + CLIENT_URL=$(jq -r '.build_urls[] | select(test("clickhouse-client-.*-amd64.tgz$"))' "$JSON_FILE") + if [ -z "$CLIENT_URL" ]; then + echo "Error: No matching client URL found in JSON" + exit 1 + fi + echo "Found client URL: ${CLIENT_URL}" + echo "CLIENT_URL=$CLIENT_URL" >> $GITHUB_ENV + + - name: Extract and Validate Package Information + run: | + # Define regex patterns + PR_REGEX="PRs/([^/]+)/([^/]+)/([^/]+)/clickhouse-client-([^-]+)-amd64.tgz" + NONPR_REGEX="s3.amazonaws.com/([^/]+)/([^/]+)/([^/]+)/([^/]+)/clickhouse-client-([^-]+)-amd64.tgz" + + # Extract information based on URL pattern + if [[ "$CLIENT_URL" =~ $PR_REGEX ]]; then + echo "Matched PR pattern" + PR_NUMBER="${BASH_REMATCH[1]}" + COMMIT_HASH="${BASH_REMATCH[2]}" + PACKAGE_TYPE="${BASH_REMATCH[3]}" + PACKAGE_VERSION="${BASH_REMATCH[4]}" + DOCKER_VERSION="${PR_NUMBER}" + TEST_RESULTS_SRC="${PR_NUMBER}" + SRC_DIR="PRs/${PR_NUMBER}" + elif [[ "$CLIENT_URL" =~ $NONPR_REGEX ]]; then + echo "Matched non-PR pattern" + BRANCH="${BASH_REMATCH[2]}" + COMMIT_HASH="${BASH_REMATCH[3]}" + PACKAGE_TYPE="${BASH_REMATCH[4]}" + PACKAGE_VERSION="${BASH_REMATCH[5]}" + DOCKER_VERSION="0" + TEST_RESULTS_SRC="0" + SRC_DIR="${BRANCH}" + else + echo "Error: The client URL did not match any expected pattern" + exit 1 + fi + + # Verify package version + if [ "$PACKAGE_VERSION" != "${{ inputs.package_version }}" ]; then + echo "Error: Extracted package version ($PACKAGE_VERSION) does not match input package version (${{ inputs.package_version }})" + exit 1 + fi + + # Extract major version and determine binary processing need + MAJOR_VERSION=$(echo "$PACKAGE_VERSION" | cut -d. -f1) + NEEDS_BINARY_PROCESSING=$([ "$MAJOR_VERSION" -ge 24 ] && echo "true" || echo "false") + + # Extract feature and set repo prefix + ALTINITY_BUILD_FEATURE=$(echo "$PACKAGE_VERSION" | rev | cut -d. -f1 | rev) + case "$ALTINITY_BUILD_FEATURE" in + "altinityhotfix") REPO_PREFIX="hotfix-" ;; + "altinityfips") REPO_PREFIX="fips-" ;; + "altinityantalya") REPO_PREFIX="antalya-" ;; + "altinitystable"|"altinitytest") REPO_PREFIX="" ;; + *) + echo "Error: Build feature not supported: ${ALTINITY_BUILD_FEATURE}" + exit 1 + ;; + esac + + # Generate folder time + FOLDER_TIME=$(date -u +"%Y-%m-%dT%H-%M-%S.%3N") + + # Set all environment variables at once + { + echo "COMMIT_HASH=${COMMIT_HASH}" + echo "DOCKER_VERSION=${DOCKER_VERSION}" + echo "FOLDER_TIME=${FOLDER_TIME}" + echo "NEEDS_BINARY_PROCESSING=${NEEDS_BINARY_PROCESSING}" + echo "PACKAGE_VERSION=${PACKAGE_VERSION}" + echo "SRC_DIR=${SRC_DIR}" + echo "TEST_RESULTS_SRC=${TEST_RESULTS_SRC}" + echo "ALTINITY_BUILD_FEATURE=${ALTINITY_BUILD_FEATURE}" + echo "REPO_PREFIX=${REPO_PREFIX}" + echo "SRC_URL=s3://${SRC_BUCKET}/${SRC_DIR}/${COMMIT_HASH}" + echo "DEST_URL=s3://${S3_STORAGE_BUCKET}/builds/stable/v${PACKAGE_VERSION}/${FOLDER_TIME}" + } >> $GITHUB_ENV + + - name: Display Extracted Information + run: | + echo "Extracted information:" + echo "altinity_build_feature: ${ALTINITY_BUILD_FEATURE}" + echo "commit_hash: ${COMMIT_HASH}" + echo "docker_version: ${DOCKER_VERSION}" + echo "folder_time: ${FOLDER_TIME}" + echo "needs_binary_processing: ${NEEDS_BINARY_PROCESSING}" + echo "package_version: ${PACKAGE_VERSION}" + echo "repo_prefix: ${REPO_PREFIX}" + echo "src_bucket: ${SRC_BUCKET}" + echo "src_dir: ${SRC_DIR}" + echo "test_results_src: ${TEST_RESULTS_SRC}" + echo "src_url: ${SRC_URL}" + echo "dest_url: ${DEST_URL}" + + - name: Install aws cli + if: ${{ env.NEEDS_BINARY_PROCESSING == 'true' }} + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: arm64 + + - name: Process ARM binary + if: ${{ env.NEEDS_BINARY_PROCESSING == 'true' }} + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + run: | + echo "Downloading clickhouse binary..." + if ! aws s3 cp "${SRC_URL}/package_aarch64/clickhouse" clickhouse; then + echo "Failed to download clickhouse binary" + exit 1 + fi + chmod +x clickhouse + + echo "Running clickhouse binary..." + ./clickhouse -q'q' + + echo "Stripping the binary..." + strip clickhouse -o clickhouse-stripped + + echo "Uploading processed binaries..." + if ! aws s3 cp clickhouse "${SRC_URL}/package_aarch64/arm-bin/non-self-extracting/"; then + echo "Failed to upload clickhouse binary" + exit 1 + fi + if ! aws s3 cp clickhouse-stripped "${SRC_URL}/package_aarch64/arm-bin/non-self-extracting/"; then + echo "Failed to upload stripped clickhouse binary" + exit 1 + fi + + copy-packages: + needs: extract-package-info + runs-on: [altinity-func-tester, altinity-on-demand] + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + SRC_URL: ${{ needs.extract-package-info.outputs.src_url }} + DEST_URL: ${{ needs.extract-package-info.outputs.dest_url }} + NEEDS_BINARY_PROCESSING: ${{ needs.extract-package-info.outputs.needs_binary_processing }} + steps: + - name: Install aws cli + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: amd64 + + # - name: Download signed hash artifacts + # run: | + # run_id=$(echo "${{ inputs.workflow_url }}" | grep -oE '[0-9]+$') + # mkdir -p signed-hashes/amd64 signed-hashes/arm64 + + # # Download AMD64 hashes + # artifact_id=$(curl -s \ + # -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + # -H "Accept: application/vnd.github.v3+json" \ + # "https://api.github.com/repos/Altinity/ClickHouse/actions/runs/$run_id/artifacts?per_page=1000" \ + # | jq -r --arg NAME "Sign release signed-hashes" '.artifacts[] | select(.name == $NAME) | .id') + # if [ -z "$artifact_id" ] || [ "$artifact_id" == "null" ]; then + # echo "Error: Could not find artifact 'Sign release signed-hashes' for run $run_id" + # exit 1 + # fi + # if ! curl -L \ + # -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + # -H "Accept: application/vnd.github.v3+json" \ + # -o "signed-hashes/amd64/hashes.zip" \ + # "https://api.github.com/repos/Altinity/ClickHouse/actions/artifacts/$artifact_id/zip"; then + # echo "Error: Failed to download AMD64 hashes" + # exit 1 + # fi + # unzip -o "signed-hashes/amd64/hashes.zip" -d signed-hashes/amd64 + + # # Download ARM64 hashes + # artifact_id=$(curl -s \ + # -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + # -H "Accept: application/vnd.github.v3+json" \ + # "https://api.github.com/repos/Altinity/ClickHouse/actions/runs/$run_id/artifacts?per_page=1000" \ + # | jq -r --arg NAME "Sign aarch64 signed-hashes" '.artifacts[] | select(.name == $NAME) | .id') + # if [ -z "$artifact_id" ] || [ "$artifact_id" == "null" ]; then + # echo "Error: Could not find artifact 'Sign aarch64 signed-hashes' for run $run_id" + # exit 1 + # fi + # if ! curl -L \ + # -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + # -H "Accept: application/vnd.github.v3+json" \ + # -o "signed-hashes/arm64/hashes.zip" \ + # "https://api.github.com/repos/Altinity/ClickHouse/actions/artifacts/$artifact_id/zip"; then + # echo "Error: Failed to download ARM64 hashes" + # exit 1 + # fi + # unzip -o "signed-hashes/arm64/hashes.zip" -d signed-hashes/arm64 + + # - name: Download packages for verification + # run: | + # # Create temporary directories for downloaded packages + # mkdir -p /tmp/arm_packages /tmp/amd_packages + + # # Download ARM packages + # echo "Downloading ARM packages for verification..." + # if ! aws s3 sync "${SRC_URL}/package_aarch64/" /tmp/arm_packages; then + # echo "Failed to download ARM packages" + # exit 1 + # fi + + # # Download AMD packages + # echo "Downloading AMD packages for verification..." + # if ! aws s3 sync "${SRC_URL}/package_release/" /tmp/amd_packages; then + # echo "Failed to download AMD packages" + # exit 1 + # fi + + # - name: Verify ARM packages + # run: | + # cd signed-hashes/arm64 + # # Verify all files + # find /tmp/arm_packages -type f | while read -r file; do + # if [ -f "$file" ]; then + # file_name=$(basename "$file") + # echo "Verifying $file_name..." + + # if ! gpg --verify "$file_name.sha256.gpg" 2>/dev/null; then + # echo "GPG verification failed for $file_name" + # exit 1 + # fi + # if ! sha256sum -c "$file_name.sha256.gpg" 2>/dev/null; then + # echo "SHA256 verification failed for $file_name" + # exit 1 + # fi + # fi + # done + + # - name: Verify AMD packages + # run: | + # cd signed-hashes/amd64 + # # Verify all files + # find /tmp/amd_packages -type f | while read -r file; do + # if [ -f "$file" ]; then + # file_name=$(basename "$file") + # echo "Verifying $file_name..." + + # if ! gpg --verify "$file_name.sha256.gpg" 2>/dev/null; then + # echo "GPG verification failed for $file_name" + # exit 1 + # fi + # if ! sha256sum -c "$file_name.sha256.gpg" 2>/dev/null; then + # echo "SHA256 verification failed for $file_name" + # exit 1 + # fi + # fi + # done + + - name: Move verified packages to destination + run: | + # Move ARM packages + echo "Moving verified ARM packages to destination..." + if ! aws s3 cp "${SRC_URL}/package_aarch64/" "${DEST_URL}/packages/ARM_PACKAGES/" --recursive; then + echo "Failed to move ARM packages to destination" + exit 1 + fi + + # Move AMD packages + echo "Moving verified AMD packages to destination..." + if ! aws s3 cp "${SRC_URL}/package_release/" "${DEST_URL}/packages/AMD_PACKAGES/" --recursive; then + echo "Failed to move AMD packages to destination" + exit 1 + fi + + # Clean up temporary directories + rm -rf /tmp/arm_packages /tmp/amd_packages + + - name: Separate ARM binary + run: | + aws s3 mv "${DEST_URL}/packages/ARM_PACKAGES/clickhouse" "${DEST_URL}/packages/ARM_PACKAGES/arm-bin/clickhouse" + aws s3 mv "${DEST_URL}/packages/ARM_PACKAGES/clickhouse-stripped" "${DEST_URL}/packages/ARM_PACKAGES/arm-bin/clickhouse-stripped" + + - name: Separate AMD binary + run: | + aws s3 mv "${DEST_URL}/packages/AMD_PACKAGES/clickhouse" "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/clickhouse" + aws s3 mv "${DEST_URL}/packages/AMD_PACKAGES/clickhouse-stripped" "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/clickhouse-stripped" + + - name: Process AMD binary + if: ${{ env.NEEDS_BINARY_PROCESSING == 'true' }} + run: | + echo "Downloading clickhouse binary..." + if ! aws s3 cp "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/clickhouse" clickhouse; then + echo "Failed to download clickhouse binary" + exit 1 + fi + chmod +x clickhouse + + echo "Running clickhouse binary..." + ./clickhouse -q'q' + + echo "Stripping the binary..." + strip clickhouse -o clickhouse-stripped + + echo "Uploading processed binaries..." + if ! aws s3 cp clickhouse "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/non-self-extracting/"; then + echo "Failed to upload clickhouse binary" + exit 1 + fi + if ! aws s3 cp clickhouse-stripped "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/non-self-extracting/"; then + echo "Failed to upload stripped clickhouse binary" + exit 1 + fi + + copy-test-results: + needs: extract-package-info + runs-on: [altinity-style-checker-aarch64, altinity-on-demand] + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + TEST_RESULTS_SRC: ${{ needs.extract-package-info.outputs.test_results_src }} + COMMIT_HASH: ${{ needs.extract-package-info.outputs.commit_hash }} + DEST_URL: ${{ needs.extract-package-info.outputs.dest_url }} + steps: + - name: Install aws cli + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: arm64 + - name: Copy test results to S3 + run: | + # Copy test results + echo "Copying test results..." + if ! aws s3 sync "s3://${SRC_BUCKET}/${TEST_RESULTS_SRC}/${COMMIT_HASH}" \ + "${DEST_URL}/test_results/"; then + echo "Failed to copy test results" + exit 1 + fi + + # publish-docker: + # needs: extract-package-info + # strategy: + # matrix: + # image_type: [server, keeper] + # variant: ['', '-alpine'] + # uses: ./.github/workflows/docker_publish.yml + # with: + # docker_image: altinityinfra/clickhouse-${{ matrix.image_type }}:${{ needs.extract-package-info.outputs.docker_version }}${{ matrix.variant }} + # release_environment: ${{ inputs.release_environment }} + # upload_artifacts: false + # s3_upload_path: "${{ needs.extract-package-info.outputs.dest_url }}/docker_images/${{ matrix.image_type }}${{ matrix.variant }}/" + # secrets: inherit + + sign-and-publish: + needs: [extract-package-info, copy-packages] + runs-on: arc-runners-clickhouse-signer + env: + GPG_PASSPHRASE: ${{ inputs.release_environment == 'production' && inputs.GPG_PASSPHRASE || secrets.GPG_PASSPHRASE }} + REPO_DNS_NAME: ${{ inputs.release_environment == 'production' && 'builds.altinity.cloud' || 'builds.staging.altinity.cloud' }} + REPO_NAME: ${{ inputs.release_environment == 'production' && 'altinity' || 'altinity-staging' }} + REPO_SUBTITLE: ${{ inputs.release_environment == 'production' && 'Stable Builds' || 'Staging Builds' }} + PACKAGE_VERSION: ${{ needs.extract-package-info.outputs.package_version }} + FOLDER_TIME: ${{ needs.extract-package-info.outputs.folder_time }} + REPO_PREFIX: ${{ needs.extract-package-info.outputs.repo_prefix }} + NEEDS_BINARY_PROCESSING: ${{ needs.extract-package-info.outputs.needs_binary_processing }} + DEST_URL: ${{ needs.extract-package-info.outputs.dest_url }} + RELEASE_ENVIRONMENT: ${{ inputs.release_environment }} + steps: + - name: Install aws cli + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: arm64 + + - name: Checkout repository + uses: actions/checkout@v4 + with: + repository: Altinity/ClickHouse + ref: antalya + path: ClickHouse + + - name: Download packages + run: | + if ! aws s3 cp "${DEST_URL}/packages/ARM_PACKAGES/" /home/runner/.cache/tmp/packages --recursive; then + echo "Failed to download ARM packages" + exit 1 + fi + if ! aws s3 cp "${DEST_URL}/packages/AMD_PACKAGES/" /home/runner/.cache/tmp/packages --recursive; then + echo "Failed to download AMD packages" + exit 1 + fi + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + + - name: Setup GPG + run: | + if [ -z ${GPG_PASSPHRASE} ] + then + echo "GPG_PASSPHRASE is not set" + exit 1 + fi + + - name: Process GPG key + run: | + echo "Processing GPG key..." + if ! aws secretsmanager get-secret-value --secret-id arn:aws:secretsmanager:us-east-1:446527654354:secret:altinity_staging_gpg-Rqbe8S --query SecretString --output text | sed -e "s/^'//" -e "s/'$//" | jq -r '.altinity_staging_gpg | @base64d' | gpg --batch --import; then + echo "Failed to import GPG key" + exit 1 + fi + gpg --list-secret-keys --with-keygrip + gpgconf --kill gpg-agent + gpg-agent --daemon --allow-preset-passphrase + if ! aws ssm get-parameter --name /gitlab-runner/key-encrypting-key --with-decryption --query Parameter.Value --output text | sudo tee /root/.key-encrypting-key >/dev/null; then + echo "Failed to get key encrypting key" + exit 1 + fi + GPG_KEY_NAME=$(gpg --list-secret-keys | grep uid | head --lines 1 | tr -s " " | cut -d " " -f 4-) + GPG_KEY_ID=$(gpg --list-secret-keys --with-keygrip "${GPG_KEY_NAME}" | grep Keygrip | head --lines 1 | tr -s " " | cut -d " " -f 4) + echo "$GPG_PASSPHRASE" | base64 -d | sudo openssl enc -d -aes-256-cbc -pbkdf2 -pass file:/root/.key-encrypting-key -in - -out - | /usr/lib/gnupg/gpg-preset-passphrase --preset $GPG_KEY_ID + + - name: Run Ansible playbook + run: | + echo "Running Ansible playbook for signing and publishing..." + echo "ansible-playbook -i ClickHouse/tests/ci/release/packaging/ansible/inventory/localhost.yml -e aws_region=$AWS_REGION -e gpg_key_id=\"$GPG_KEY_ID\" -e gpg_key_name=\"$GPG_KEY_NAME\" -e local_repo_path="/home/runner/.cache/${{ inputs.release_environment }}" -e pkgver=\"${PACKAGE_VERSION}\" -e release_environment=$RELEASE_ENVIRONMENT -e repo_dns_name=$REPO_DNS_NAME -e repo_name=$REPO_NAME -e repo_prefix=\"$REPO_PREFIX\" -e repo_subtitle=\"$REPO_SUBTITLE\" -e s3_pkgs_bucket=$S3_STORAGE_BUCKET -e s3_pkgs_path=\"builds/stable/v${PACKAGE_VERSION}/${FOLDER_TIME}\" -e repo_path=\"/home/runner/.cache/${{ inputs.release_environment }}\" ClickHouse/tests/ci/release/packaging/ansible/sign-and-release.yml " + if ! ansible-playbook -i ClickHouse/tests/ci/release/packaging/ansible/inventory/localhost.yml \ + -e aws_region=$AWS_REGION \ + -e gpg_key_id="$GPG_KEY_ID" \ + -e gpg_key_name="$GPG_KEY_NAME" \ + -e local_repo_path="/home/runner/.cache/${{ inputs.release_environment }}" \ + -e pkgver="${PACKAGE_VERSION}" \ + -e release_environment=$RELEASE_ENVIRONMENT \ + -e repo_dns_name=$REPO_DNS_NAME \ + -e repo_name=$REPO_NAME \ + -e repo_prefix="$REPO_PREFIX" \ + -e repo_subtitle="$REPO_SUBTITLE" \ + -e s3_pkgs_bucket=$S3_STORAGE_BUCKET \ + -e s3_pkgs_path="builds/stable/v${PACKAGE_VERSION}/${FOLDER_TIME}" \ + ClickHouse/tests/ci/release/packaging/ansible/sign-and-release.yml; then + echo "Ansible playbook failed" + exit 1 + fi + gpgconf --kill gpg-agent + ls -hal + + - name: Cleanup temporary files + if: always() + run: | + echo "Cleaning up temporary files..." + rm -rf /home/runner/.cache/tmp/packages || true + + repo-sanity-check: + needs: sign-and-publish + uses: Altinity/ClickHouse/.github/workflows/repo-sanity-checks.yml@antalya + + copy-to-released: + needs: [sign-and-publish] + if: ${{ inputs.release_environment == 'production' }} + runs-on: [altinity-style-checker-aarch64, altinity-on-demand] + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + PACKAGE_VERSION: ${{ needs.extract-package-info.outputs.package_version }} + DEST_URL: ${{ needs.extract-package-info.outputs.dest_url }} + steps: + - name: Install aws cli + uses: unfor19/install-aws-cli-action@v1 + with: + version: 2 + arch: arm64 + + - name: Copy to released directory + run: | + - name: Copy to released directory + run: | + echo "Copying to released directory..." + echo "Source: ${DEST_URL}/" + echo "Destination: s3://${S3_STORAGE_BUCKET}/builds/released/v${PACKAGE_VERSION}/" + + if ! aws s3 sync "${DEST_URL}/" "s3://${S3_STORAGE_BUCKET}/builds/released/v${PACKAGE_VERSION}/" --no-progress; then + echo "Failed to copy to released directory" + exit 1 + fi + + echo "Verifying copy operation..." + if ! aws s3 ls "s3://${S3_STORAGE_BUCKET}/builds/released/v${PACKAGE_VERSION}/" | grep -q "packages"; then + echo "Error: Packages directory not found in destination" + exit 1 + fi diff --git a/ci/defs/defs.py b/ci/defs/defs.py index fcd6bc93ee06..d4fcec09ffe5 100644 --- a/ci/defs/defs.py +++ b/ci/defs/defs.py @@ -1,38 +1,56 @@ from praktika import Artifact, Docker, Secret from praktika.utils import MetaClasses, Utils +from settings import altinity_overrides # i.e. "ClickHouse/ci/tmp" TEMP_DIR = f"{Utils.cwd()}/ci/tmp" # == _Settings.TEMP_DIR != env_helper.TEMP_PATH -SYNC = "CH Inc sync" +SYNC = "Altinity sync" -S3_BUCKET_NAME = "clickhouse-builds" -S3_REPORT_BUCKET_NAME = "clickhouse-test-reports" -S3_BUCKET_HTTP_ENDPOINT = "clickhouse-builds.s3.amazonaws.com" -S3_REPORT_BUCKET_HTTP_ENDPOINT = "s3.amazonaws.com/clickhouse-test-reports" +S3_BUCKET_NAME = altinity_overrides.S3_BUCKET_NAME +S3_REPORT_BUCKET_NAME = altinity_overrides.S3_REPORT_BUCKET_NAME +S3_BUCKET_HTTP_ENDPOINT = altinity_overrides.S3_BUCKET_HTTP_ENDPOINT +S3_REPORT_BUCKET_HTTP_ENDPOINT = altinity_overrides.S3_REPORT_BUCKET_HTTP_ENDPOINT class RunnerLabels: CI_SERVICES = "ci_services" CI_SERVICES_EBS = "ci_services_ebs" - FUNC_TESTER_AMD = ["self-hosted", "amd-medium"] - FUNC_TESTER_ARM = ["self-hosted", "arm-medium"] - AMD_LARGE = ["self-hosted", "amd-large"] - ARM_LARGE = ["self-hosted", "arm-large"] - AMD_MEDIUM = ["self-hosted", "amd-medium"] - ARM_MEDIUM = ["self-hosted", "arm-medium"] - AMD_MEDIUM_CPU = ["self-hosted", "amd-medium-cpu"] - ARM_MEDIUM_CPU = ["self-hosted", "arm-medium-cpu"] - AMD_MEDIUM_MEM = ["self-hosted", "amd-medium-mem"] - ARM_MEDIUM_MEM = ["self-hosted", "arm-medium-mem"] - AMD_SMALL = ["self-hosted", "amd-small"] - ARM_SMALL = ["self-hosted", "arm-small"] - AMD_SMALL_MEM = ["self-hosted", "amd-small-mem"] - ARM_SMALL_MEM = ["self-hosted", "arm-small-mem"] + BUILDER_AMD = ["self-hosted", "altinity-on-demand", "altinity-builder"] + BUILDER_ARM = ["self-hosted", "altinity-on-demand", "altinity-builder"] + FUNC_TESTER_AMD = ["self-hosted", "altinity-on-demand", "altinity-func-tester"] + FUNC_TESTER_ARM = [ + "self-hosted", + "altinity-on-demand", + "altinity-func-tester-aarch64", + ] + AMD_LARGE = ["self-hosted", "altinity-on-demand", "altinity-func-tester"] + ARM_LARGE = ["self-hosted", "altinity-on-demand", "altinity-func-tester-aarch64"] + AMD_MEDIUM = ["self-hosted", "altinity-on-demand", "altinity-func-tester"] + ARM_MEDIUM = ["self-hosted", "altinity-on-demand", "altinity-func-tester-aarch64"] + AMD_MEDIUM_CPU = ["self-hosted", "altinity-on-demand", "altinity-func-tester"] + ARM_MEDIUM_CPU = [ + "self-hosted", + "altinity-on-demand", + "altinity-func-tester-aarch64", + ] + AMD_MEDIUM_MEM = ["self-hosted", "altinity-on-demand", "altinity-func-tester"] + ARM_MEDIUM_MEM = [ + "self-hosted", + "altinity-on-demand", + "altinity-func-tester-aarch64", + ] + AMD_SMALL = ["self-hosted", "altinity-on-demand", "altinity-style-checker"] + ARM_SMALL = ["self-hosted", "altinity-on-demand", "altinity-style-checker-aarch64"] + AMD_SMALL_MEM = ["self-hosted", "altinity-on-demand", "altinity-style-checker"] MACOS_ARM_SMALL = ["self-hosted", "arm_macos_small"] MACOS_AMD_SMALL = ["self-hosted", "amd_macos_m1"] - STYLE_CHECK_AMD = ["self-hosted", "style-checker"] - STYLE_CHECK_ARM = ["self-hosted", "style-checker-aarch64"] + STYLE_CHECK_AMD = ["self-hosted", "altinity-on-demand", "altinity-style-checker"] + STYLE_CHECK_ARM = [ + "self-hosted", + "altinity-on-demand", + "altinity-style-checker-aarch64", + ] class CIFiles: @@ -40,7 +58,7 @@ class CIFiles: UNIT_TESTS_BIN = f"{TEMP_DIR}/build/src/unit_tests_dbms" -BASE_BRANCH = "master" +BASE_BRANCH = altinity_overrides.MAIN_BRANCH azure_secret = Secret.Config( name="azure_connection_string", @@ -55,237 +73,242 @@ class CIFiles: SECRETS = [ Secret.Config( - name="dockerhub_robot_password", - type=Secret.Type.AWS_SSM_PARAMETER, + name=altinity_overrides.DOCKERHUB_SECRET, + type=Secret.Type.GH_SECRET, ), Secret.Config( - name="clickhouse-test-stat-url", - type=Secret.Type.AWS_SSM_PARAMETER, - region="us-east-1", + name=altinity_overrides.SECRET_CI_DB_URL, + type=Secret.Type.GH_SECRET, ), Secret.Config( - name="clickhouse-test-stat-login", - type=Secret.Type.AWS_SSM_PARAMETER, - region="us-east-1", + name=altinity_overrides.SECRET_CI_DB_USER, + type=Secret.Type.GH_SECRET, ), Secret.Config( - name="clickhouse-test-stat-password", - type=Secret.Type.AWS_SSM_PARAMETER, - region="us-east-1", + name=altinity_overrides.SECRET_CI_DB_PASSWORD, + type=Secret.Type.GH_SECRET, ), - azure_secret, + # azure_secret, chcache_secret, + # Secret.Config( + # name="woolenwolf_gh_app.clickhouse-app-id", + # type=Secret.Type.AWS_SSM_SECRET, + # ), + # Secret.Config( + # name="woolenwolf_gh_app.clickhouse-app-key", + # type=Secret.Type.AWS_SSM_SECRET, + # ), Secret.Config( - name="woolenwolf_gh_app.clickhouse-app-id", - type=Secret.Type.AWS_SSM_SECRET, + name="AWS_ACCESS_KEY_ID", + type=Secret.Type.GH_SECRET, ), Secret.Config( - name="woolenwolf_gh_app.clickhouse-app-key", - type=Secret.Type.AWS_SSM_SECRET, + name="AWS_SECRET_ACCESS_KEY", + type=Secret.Type.GH_SECRET, ), ] DOCKERS = [ Docker.Config( - name="clickhouse/style-test", + name="altinityinfra/style-test", path="./ci/docker/style-test", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/fasttest", + name="altinityinfra/fasttest", path="./ci/docker/fasttest", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/binary-builder", + name="altinityinfra/binary-builder", path="./ci/docker/binary-builder", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/fasttest"], + depends_on=["altinityinfra/fasttest"], ), Docker.Config( - name="clickhouse/stateless-test", + name="altinityinfra/stateless-test", path="./ci/docker/stateless-test", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/test-base"], + depends_on=["altinityinfra/test-base"], ), Docker.Config( - name="clickhouse/cctools", + name="altinityinfra/cctools", path="./ci/docker/cctools", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/fasttest"], + depends_on=["altinityinfra/fasttest"], ), Docker.Config( - name="clickhouse/test-base", + name="altinityinfra/test-base", path="./ci/docker/test-base", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/stress-test", + name="altinityinfra/stress-test", path="./ci/docker/stress-test", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/stateless-test"], + depends_on=["altinityinfra/stateless-test"], ), Docker.Config( - name="clickhouse/fuzzer", + name="altinityinfra/fuzzer", path="./ci/docker/fuzzer", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/test-base"], + depends_on=["altinityinfra/test-base"], ), Docker.Config( - name="clickhouse/performance-comparison", + name="altinityinfra/performance-comparison", path="./ci/docker/performance-comparison", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/test-base"], + depends_on=["altinityinfra/test-base"], ), Docker.Config( - name="clickhouse/keeper-jepsen-test", + name="altinityinfra/keeper-jepsen-test", path="./ci/docker/keeper-jepsen-test", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/test-base"], + depends_on=["altinityinfra/test-base"], ), Docker.Config( - name="clickhouse/server-jepsen-test", + name="altinityinfra/server-jepsen-test", path="./ci/docker/server-jepsen-test", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/test-base"], + depends_on=["altinityinfra/test-base"], ), Docker.Config( - name="clickhouse/integration-test", + name="altinityinfra/integration-test", path="./ci/docker/integration/base", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/test-base"], + depends_on=["altinityinfra/test-base"], ), Docker.Config( - name="clickhouse/integration-tests-runner", + name="altinityinfra/integration-tests-runner", path="./ci/docker/integration/runner", platforms=Docker.Platforms.arm_amd, - depends_on=["clickhouse/test-base"], + depends_on=["altinityinfra/test-base"], ), Docker.Config( - name="clickhouse/integration-test-with-unity-catalog", + name="altinityinfra/integration-test-with-unity-catalog", path="./ci/docker/integration/clickhouse_with_unity_catalog", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/integration-test-with-hms", + name="altinityinfra/integration-test-with-hms", path="./ci/docker/integration/clickhouse_with_hms_catalog", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/integration-helper", + name="altinityinfra/integration-helper", path="./ci/docker/integration/helper_container", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/kerberos-kdc", + name="altinityinfra/kerberos-kdc", path="./ci/docker/integration/kerberos_kdc", platforms=[Docker.Platforms.AMD], depends_on=[], ), Docker.Config( - name="clickhouse/test-mysql80", + name="altinityinfra/test-mysql80", path="./ci/docker/integration/mysql80", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/test-mysql57", + name="altinityinfra/test-mysql57", path="./ci/docker/integration/mysql57", platforms=Docker.Platforms.AMD, depends_on=[], ), Docker.Config( - name="clickhouse/mysql-golang-client", + name="altinityinfra/mysql-golang-client", path="./ci/docker/integration/mysql_golang_client", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/mysql-java-client", + name="altinityinfra/mysql-java-client", path="./ci/docker/integration/mysql_java_client", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/mysql-js-client", + name="altinityinfra/mysql-js-client", path="./ci/docker/integration/mysql_js_client", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/arrowflight-server-test", + name="altinityinfra/arrowflight-server-test", path="./ci/docker/integration/arrowflight", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/dotnet-client", + name="altinityinfra/dotnet-client", path="./ci/docker/integration/dotnet_client", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/mysql-php-client", + name="altinityinfra/mysql-php-client", path="./ci/docker/integration/mysql_php_client", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/nginx-dav", + name="altinityinfra/nginx-dav", path="./ci/docker/integration/nginx_dav", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/postgresql-java-client", + name="altinityinfra/postgresql-java-client", path="./ci/docker/integration/postgresql_java_client", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/python-bottle", + name="altinityinfra/python-bottle", path="./ci/docker/integration/resolver", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/s3-proxy", + name="altinityinfra/s3-proxy", path="./ci/docker/integration/s3_proxy", platforms=Docker.Platforms.arm_amd, depends_on=[], ), + # Docker.Config( + # name="clickhouse/docs-builder", + # path="./ci/docker/docs-builder", + # platforms=Docker.Platforms.arm_amd, + # depends_on=[], + # ), Docker.Config( - name="clickhouse/docs-builder", - path="./ci/docker/docs-builder", - platforms=Docker.Platforms.arm_amd, - depends_on=[], - ), - Docker.Config( - name="clickhouse/install-deb-test", + name="altinityinfra/install-deb-test", path="./ci/docker/install/deb", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/install-rpm-test", + name="altinityinfra/install-rpm-test", path="./ci/docker/install/rpm", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/sqlancer-test", + name="altinityinfra/sqlancer-test", path="./ci/docker/sqlancer-test", platforms=Docker.Platforms.arm_amd, depends_on=[], ), Docker.Config( - name="clickhouse/mysql_dotnet_client", + name="altinityinfra/mysql_dotnet_client", path="./ci/docker/integration/mysql_dotnet_client", platforms=Docker.Platforms.arm_amd, depends_on=[], @@ -379,12 +402,14 @@ class ArtifactNames: LLVM_COVERAGE_FILE = "LLVM_COVERAGE_FILE" # .profdata file LLVM_COVERAGE_INFO_FILE = "LLVM_COVERAGE_INFO_FILE" # .info file generated from .profdata, used for debugging coverage results CH_AMD_RELEASE = "CH_AMD_RELEASE" + CH_AMD_RELEASE_STRIPPED = "CH_AMD_RELEASE_STRIPPED" CH_AMD_ASAN = "CH_AMD_ASAN" CH_AMD_TSAN = "CH_AMD_TSAN" CH_AMD_MSAN = "CH_AMD_MSAN" CH_AMD_UBSAN = "CH_AMD_UBSAN" CH_AMD_BINARY = "CH_AMD_BINARY" CH_ARM_RELEASE = "CH_ARM_RELEASE" + CH_ARM_RELEASE_STRIPPED = "CH_ARM_RELEASE_STRIPPED" CH_ARM_ASAN = "CH_ARM_ASAN" CH_ARM_TSAN = "CH_ARM_TSAN" @@ -465,12 +490,14 @@ class ArtifactNames: BINARIES_WITH_LONG_RETENTION = [ ArtifactNames.CH_AMD_DEBUG, ArtifactNames.CH_AMD_RELEASE, + ArtifactNames.CH_AMD_RELEASE_STRIPPED, ArtifactNames.CH_AMD_ASAN, ArtifactNames.CH_AMD_TSAN, ArtifactNames.CH_AMD_MSAN, ArtifactNames.CH_AMD_UBSAN, ArtifactNames.CH_AMD_BINARY, ArtifactNames.CH_ARM_RELEASE, + ArtifactNames.CH_ARM_RELEASE_STRIPPED, ArtifactNames.CH_ARM_ASAN, ArtifactNames.CH_ARM_TSAN, ] @@ -509,6 +536,16 @@ class ArtifactConfigs: ArtifactNames.CH_LOONGARCH64, ] ) + clickhouse_stripped_binaries = Artifact.Config( + name="...", + type=Artifact.Type.S3, + path=f"{TEMP_DIR}/build/programs/self-extracting/clickhouse-stripped", + ).parametrize( + names=[ + ArtifactNames.CH_AMD_RELEASE_STRIPPED, + ArtifactNames.CH_ARM_RELEASE_STRIPPED, + ] + ) llvm_profdata_file = Artifact.Config( name="...", type=Artifact.Type.S3, diff --git a/ci/defs/job_configs.py b/ci/defs/job_configs.py index 47dcbd960d05..fc0bd8fc1ef2 100644 --- a/ci/defs/job_configs.py +++ b/ci/defs/job_configs.py @@ -13,11 +13,15 @@ LIMITED_MEM = Utils.physical_memory() - 2 * 1024**3 +# NOTE (strtgbb): We use ZRAM, so it's okay to use more memory than is physically available +LIMITED_MEM = LIMITED_MEM * 2 + BINARY_DOCKER_COMMAND = ( - "clickhouse/binary-builder+--network=host" + "altinityinfra/binary-builder+--network=host" f"+--memory={Utils.physical_memory() * 95 // 100}" f"+--memory-reservation={Utils.physical_memory() * 9 // 10}" f"+--volume=.:/ClickHouse" + '+--env=AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID"+--env=AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY"' ) if Utils.is_arm(): @@ -37,7 +41,7 @@ "./programs", "./rust", "./ci/jobs/build_clickhouse.py", - "./ci/jobs/scripts/job_hooks/build_profile_hook.py", + # "./ci/jobs/scripts/job_hooks/build_profile_hook.py", "./utils/list-licenses", "./utils/self-extracting-executable", ], @@ -80,7 +84,7 @@ # some tests can be flaky due to very slow disks - use tmpfs for temporary ClickHouse files # --cap-add=SYS_PTRACE and --privileged for gdb in docker # --root/--privileged/--cgroupns=host is required for clickhouse-test --memory-limit - run_in_docker=f"clickhouse/stateless-test+--memory={LIMITED_MEM}+--cgroupns=host+--cap-add=SYS_PTRACE+--privileged+--security-opt seccomp=unconfined+--tmpfs /tmp/clickhouse:mode=1777+--volume=./ci/tmp/var/lib/clickhouse:/var/lib/clickhouse+--volume=./ci/tmp/etc/clickhouse-client:/etc/clickhouse-client+--volume=./ci/tmp/etc/clickhouse-server:/etc/clickhouse-server+--volume=./ci/tmp/etc/clickhouse-server1:/etc/clickhouse-server1+--volume=./ci/tmp/etc/clickhouse-server2:/etc/clickhouse-server2+--volume=./ci/tmp/var/log:/var/log+root", + run_in_docker=f"altinityinfra/stateless-test+--memory={LIMITED_MEM}+--cgroupns=host+--cap-add=SYS_PTRACE+--privileged+--security-opt seccomp=unconfined+--tmpfs /tmp/clickhouse:mode=1777+--volume=./ci/tmp/var/lib/clickhouse:/var/lib/clickhouse+--volume=./ci/tmp/etc/clickhouse-client:/etc/clickhouse-client+--volume=./ci/tmp/etc/clickhouse-server:/etc/clickhouse-server+--volume=./ci/tmp/etc/clickhouse-server1:/etc/clickhouse-server1+--volume=./ci/tmp/etc/clickhouse-server2:/etc/clickhouse-server2+--volume=./ci/tmp/var/log:/var/log+root+--env=AZURE_STORAGE_KEY=$AZURE_STORAGE_KEY+--env=AZURE_ACCOUNT_NAME=$AZURE_ACCOUNT_NAME+--env=AZURE_CONTAINER_NAME=$AZURE_CONTAINER_NAME+--env=AZURE_STORAGE_ACCOUNT_URL=$AZURE_STORAGE_ACCOUNT_URL+--env=CLICKHOUSE_TEST_STAT_URL=$CLICKHOUSE_TEST_STAT_URL+--env=CLICKHOUSE_TEST_STAT_LOGIN=$CLICKHOUSE_TEST_STAT_LOGIN+--env=CLICKHOUSE_TEST_STAT_PASSWORD=$CLICKHOUSE_TEST_STAT_PASSWORD", digest_config=Job.CacheDigestConfig( include_paths=[ "./ci/jobs/functional_tests.py", @@ -93,17 +97,18 @@ "./tests/config", "./tests/*.txt", "./ci/docker/stateless-test", + "./tests/broken_tests.yaml", ], ), result_name_for_cidb="Tests", - timeout=int(3600 * 2.5), + timeout=int(3600 * 3.5), ) common_unit_test_job_config = Job.Config( name=JobNames.UNITTEST, runs_on=[], # from parametrize() command=f"python3 ./ci/jobs/unit_tests_job.py", - run_in_docker="clickhouse/fasttest+--privileged", + run_in_docker="altinityinfra/fasttest+--privileged", digest_config=Job.CacheDigestConfig( include_paths=["./ci/jobs/unit_tests_job.py"], ), @@ -143,7 +148,7 @@ "./ci/jobs/scripts/docker_in_docker.sh", ], ), - run_in_docker=f"clickhouse/integration-tests-runner+root+--memory={LIMITED_MEM}+--privileged+--dns-search='.'+--security-opt seccomp=unconfined+--cap-add=SYS_PTRACE+{docker_sock_mount}+--volume=clickhouse_integration_tests_volume:/var/lib/docker+--cgroupns=host", + run_in_docker=f"altinityinfra/integration-tests-runner+root+--memory={LIMITED_MEM}+--privileged+--dns-search='.'+--security-opt seccomp=unconfined+--cap-add=SYS_PTRACE+{docker_sock_mount}+--volume=clickhouse_integration_tests_volume:/var/lib/docker+--cgroupns=host+--env=CLICKHOUSE_TEST_STAT_URL=$CLICKHOUSE_TEST_STAT_URL+--env=CLICKHOUSE_TEST_STAT_LOGIN=$CLICKHOUSE_TEST_STAT_LOGIN+--env=CLICKHOUSE_TEST_STAT_PASSWORD=$CLICKHOUSE_TEST_STAT_PASSWORD", ) @@ -152,7 +157,7 @@ class JobConfigs: name=JobNames.STYLE_CHECK, runs_on=RunnerLabels.STYLE_CHECK_ARM, command="python3 ./ci/jobs/check_style.py", - run_in_docker="clickhouse/style-test", + run_in_docker="altinityinfra/style-test", enable_commit_status=True, ) pr_body = Job.Config( @@ -162,26 +167,26 @@ class JobConfigs: allow_merge_on_failure=True, enable_gh_auth=True, ) - code_review = Job.Config( - name=JobNames.CODE_REVIEW, - runs_on=RunnerLabels.STYLE_CHECK_ARM, - command="python3 ./ci/jobs/copilot_review_job.py --pre", - allow_merge_on_failure=True, - enable_gh_auth=True, - ) - ci_results_review = Job.Config( - name=JobNames.CI_RESULTS_REVIEW, - runs_on=RunnerLabels.STYLE_CHECK_ARM, - command="python3 ./ci/jobs/copilot_review_job.py --post", - allow_merge_on_failure=True, - enable_gh_auth=True, - ) + #code_review = Job.Config( + # name=JobNames.CODE_REVIEW, + # runs_on=RunnerLabels.STYLE_CHECK_ARM, + # command="python3 ./ci/jobs/copilot_review_job.py --pre", + # allow_merge_on_failure=True, + # enable_gh_auth=True, + #) + #ci_results_review = Job.Config( + # name=JobNames.CI_RESULTS_REVIEW, + # runs_on=RunnerLabels.STYLE_CHECK_ARM, + # command="python3 ./ci/jobs/copilot_review_job.py --post", + # allow_merge_on_failure=True, + # enable_gh_auth=True, + #) fast_test = Job.Config( name=JobNames.FAST_TEST, runs_on=RunnerLabels.AMD_LARGE, command="python3 ./ci/jobs/fast_test.py", # --network=host required for ec2 metadata http endpoint to work - run_in_docker="clickhouse/fasttest+--network=host+--volume=./ci/tmp/var/lib/clickhouse:/var/lib/clickhouse+--volume=./ci/tmp/etc/clickhouse-client:/etc/clickhouse-client+--volume=./ci/tmp/etc/clickhouse-server:/etc/clickhouse-server+--volume=./ci/tmp/var/log:/var/log+--volume=.:/ClickHouse", + run_in_docker="altinityinfra/fasttest+--network=host+--volume=./ci/tmp/var/lib/clickhouse:/var/lib/clickhouse+--volume=./ci/tmp/etc/clickhouse-client:/etc/clickhouse-client+--volume=./ci/tmp/etc/clickhouse-server:/etc/clickhouse-server+--volume=./ci/tmp/var/log:/var/log+--volume=.:/ClickHouse+--env=AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID+--env=AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY", digest_config=fast_test_digest_config, result_name_for_cidb="Tests", ) @@ -227,7 +232,7 @@ class JobConfigs: build_jobs = common_build_job_config.set_post_hooks( post_hooks=[ "python3 ./ci/jobs/scripts/job_hooks/build_master_head_hook.py", - "python3 ./ci/jobs/scripts/job_hooks/build_profile_hook.py", + # "python3 ./ci/jobs/scripts/job_hooks/build_profile_hook.py", ], ).parametrize( Job.ParamSet( @@ -299,7 +304,7 @@ class JobConfigs: provides=[ ArtifactNames.CH_COV_BIN, ], - runs_on=RunnerLabels.ARM_LARGE, + runs_on=RunnerLabels.BUILDER_AMD, ), ) release_build_jobs = common_build_job_config.set_post_hooks( @@ -312,22 +317,24 @@ class JobConfigs: parameter=BuildTypes.AMD_RELEASE, provides=[ ArtifactNames.CH_AMD_RELEASE, + ArtifactNames.CH_AMD_RELEASE_STRIPPED, ArtifactNames.DEB_AMD_RELEASE, ArtifactNames.RPM_AMD_RELEASE, ArtifactNames.TGZ_AMD_RELEASE, ], - runs_on=RunnerLabels.ARM_LARGE, + runs_on=RunnerLabels.BUILDER_AMD, timeout=3 * 3600, ), Job.ParamSet( parameter=BuildTypes.ARM_RELEASE, provides=[ ArtifactNames.CH_ARM_RELEASE, + ArtifactNames.CH_ARM_RELEASE_STRIPPED, ArtifactNames.DEB_ARM_RELEASE, ArtifactNames.RPM_ARM_RELEASE, ArtifactNames.TGZ_ARM_RELEASE, ], - runs_on=RunnerLabels.ARM_LARGE, + runs_on=RunnerLabels.BUILDER_AMD, ), ) extra_validation_build_jobs = common_build_job_config.set_post_hooks( @@ -421,7 +428,7 @@ class JobConfigs: ).parametrize( Job.ParamSet( parameter="amd_release", - runs_on=RunnerLabels.STYLE_CHECK_AMD, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ ArtifactNames.DEB_AMD_RELEASE, ArtifactNames.CH_AMD_RELEASE, @@ -431,7 +438,7 @@ class JobConfigs: ), Job.ParamSet( parameter="arm_release", - runs_on=RunnerLabels.STYLE_CHECK_ARM, + runs_on=RunnerLabels.FUNC_TESTER_ARM, requires=[ ArtifactNames.DEB_ARM_RELEASE, ArtifactNames.CH_ARM_RELEASE, @@ -454,7 +461,7 @@ class JobConfigs: ).parametrize( Job.ParamSet( parameter="amd_release", - runs_on=RunnerLabels.STYLE_CHECK_AMD, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ ArtifactNames.DEB_AMD_RELEASE, ArtifactNames.RPM_AMD_RELEASE, @@ -464,7 +471,7 @@ class JobConfigs: ), Job.ParamSet( parameter="arm_release", - runs_on=RunnerLabels.STYLE_CHECK_ARM, + runs_on=RunnerLabels.FUNC_TESTER_ARM, requires=[ ArtifactNames.DEB_ARM_RELEASE, ArtifactNames.RPM_ARM_RELEASE, @@ -518,7 +525,7 @@ class JobConfigs: runs_on=RunnerLabels.FUNC_TESTER_ARM, command="python3 ./ci/jobs/functional_tests.py --options BugfixValidation", # some tests can be flaky due to very slow disks - use tmpfs for temporary ClickHouse files - run_in_docker="clickhouse/stateless-test+--network=host+--privileged+--cgroupns=host+root+--security-opt seccomp=unconfined+--tmpfs /tmp/clickhouse:mode=1777", + run_in_docker="altinityinfra/stateless-test+--network=host+--privileged+--cgroupns=host+root+--security-opt seccomp=unconfined+--tmpfs /tmp/clickhouse:mode=1777", digest_config=Job.CacheDigestConfig( include_paths=[ "./ci/jobs/functional_tests.py", @@ -554,59 +561,59 @@ class JobConfigs: ], Job.ParamSet( parameter="amd_asan, db disk, distributed plan, sequential", - runs_on=RunnerLabels.AMD_SMALL_MEM, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_ASAN], ), - Job.ParamSet( - parameter="amd_llvm_coverage, old analyzer, s3 storage, DatabaseReplicated, WasmEdge, parallel", - runs_on=RunnerLabels.AMD_MEDIUM, # large machine - no boost, why? - requires=[ArtifactNames.CH_AMD_LLVM_COVERAGE_BUILD], - provides=[ArtifactNames.LLVM_COVERAGE_FILE + f"_ft_old_s3_db_repl_wasm_parallel"], - ), - Job.ParamSet( - parameter="amd_llvm_coverage, old analyzer, s3 storage, DatabaseReplicated, WasmEdge, sequential", - runs_on=RunnerLabels.AMD_SMALL, - requires=[ArtifactNames.CH_AMD_LLVM_COVERAGE_BUILD], - provides=[ArtifactNames.LLVM_COVERAGE_FILE + f"_ft_old_s3_db_repl_wasm_sequential"], - ), - Job.ParamSet( - parameter="amd_llvm_coverage, ParallelReplicas, s3 storage, parallel", - runs_on=RunnerLabels.AMD_MEDIUM, # large machine - no boost, why? - requires=[ArtifactNames.CH_AMD_LLVM_COVERAGE_BUILD], - provides=[ArtifactNames.LLVM_COVERAGE_FILE + f"_ft_s3_parallel"], - ), - Job.ParamSet( - parameter="amd_llvm_coverage, ParallelReplicas, s3 storage, sequential", - runs_on=RunnerLabels.AMD_SMALL, - requires=[ArtifactNames.CH_AMD_LLVM_COVERAGE_BUILD], - provides=[ArtifactNames.LLVM_COVERAGE_FILE + f"_ft_s3_sequential"], - ), - Job.ParamSet( - parameter="amd_llvm_coverage, AsyncInsert, s3 storage, parallel", - runs_on=RunnerLabels.AMD_MEDIUM, # large machine - no boost, why? - requires=[ArtifactNames.CH_AMD_LLVM_COVERAGE_BUILD], - provides=[ArtifactNames.LLVM_COVERAGE_FILE + f"_ft_s3_async_parallel"], - ), - Job.ParamSet( - parameter="amd_llvm_coverage, AsyncInsert, s3 storage, sequential", - runs_on=RunnerLabels.AMD_SMALL, - requires=[ArtifactNames.CH_AMD_LLVM_COVERAGE_BUILD], - provides=[ArtifactNames.LLVM_COVERAGE_FILE + f"_ft_s3_async_sequential"], - ), + # Job.ParamSet( # NOTE (strtgbb): llvm cov jobs not configured yet. Determine if useful first. + # parameter="amd_llvm_coverage, old analyzer, s3 storage, DatabaseReplicated, WasmEdge, parallel", + # runs_on=RunnerLabels.FUNC_TESTER_AMD, # large machine - no boost, why? + # requires=[ArtifactNames.CH_AMD_LLVM_COVERAGE_BUILD], + # provides=[ArtifactNames.LLVM_COVERAGE_FILE + f"_ft_old_s3_db_repl_wasm_parallel"], + # ), + # Job.ParamSet( + # parameter="amd_llvm_coverage, old analyzer, s3 storage, DatabaseReplicated, WasmEdge, sequential", + # runs_on=RunnerLabels.FUNC_TESTER_AMD, + # requires=[ArtifactNames.CH_AMD_LLVM_COVERAGE_BUILD], + # provides=[ArtifactNames.LLVM_COVERAGE_FILE + f"_ft_old_s3_db_repl_wasm_sequential"], + # ), + # Job.ParamSet( + # parameter="amd_llvm_coverage, ParallelReplicas, s3 storage, parallel", + # runs_on=RunnerLabels.FUNC_TESTER_AMD, # large machine - no boost, why? + # requires=[ArtifactNames.CH_AMD_LLVM_COVERAGE_BUILD], + # provides=[ArtifactNames.LLVM_COVERAGE_FILE + f"_ft_s3_parallel"], + # ), + # Job.ParamSet( + # parameter="amd_llvm_coverage, ParallelReplicas, s3 storage, sequential", + # runs_on=RunnerLabels.FUNC_TESTER_AMD, + # requires=[ArtifactNames.CH_AMD_LLVM_COVERAGE_BUILD], + # provides=[ArtifactNames.LLVM_COVERAGE_FILE + f"_ft_s3_sequential"], + # ), + # Job.ParamSet( + # parameter="amd_llvm_coverage, AsyncInsert, s3 storage, parallel", + # runs_on=RunnerLabels.FUNC_TESTER_AMD, # large machine - no boost, why? + # requires=[ArtifactNames.CH_AMD_LLVM_COVERAGE_BUILD], + # provides=[ArtifactNames.LLVM_COVERAGE_FILE + f"_ft_s3_async_parallel"], + # ), + # Job.ParamSet( + # parameter="amd_llvm_coverage, AsyncInsert, s3 storage, sequential", + # runs_on=RunnerLabels.FUNC_TESTER_AMD, + # requires=[ArtifactNames.CH_AMD_LLVM_COVERAGE_BUILD], + # provides=[ArtifactNames.LLVM_COVERAGE_FILE + f"_ft_s3_async_sequential"], + # ), Job.ParamSet( parameter="amd_debug, parallel", - runs_on=RunnerLabels.AMD_MEDIUM_CPU, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_DEBUG], ), Job.ParamSet( parameter="amd_debug, sequential", - runs_on=RunnerLabels.AMD_SMALL, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_DEBUG], ), *[ Job.ParamSet( parameter=f"amd_tsan, parallel, {batch}/{total_batches}", - runs_on=RunnerLabels.AMD_LARGE, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_TSAN], ) for total_batches in (2,) @@ -615,7 +622,7 @@ class JobConfigs: *[ Job.ParamSet( parameter=f"amd_tsan, sequential, {batch}/{total_batches}", - runs_on=RunnerLabels.AMD_SMALL, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_TSAN], ) for total_batches in (2,) @@ -624,7 +631,7 @@ class JobConfigs: *[ Job.ParamSet( parameter=f"amd_msan, WasmEdge, parallel, {batch}/{total_batches}", - runs_on=RunnerLabels.AMD_LARGE, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_MSAN], ) for total_batches in (2,) @@ -633,7 +640,7 @@ class JobConfigs: *[ Job.ParamSet( parameter=f"amd_msan, WasmEdge, sequential, {batch}/{total_batches}", - runs_on=RunnerLabels.AMD_SMALL_MEM, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_MSAN], ) for total_batches in (2,) @@ -641,28 +648,28 @@ class JobConfigs: ], Job.ParamSet( parameter="amd_ubsan, parallel", - runs_on=RunnerLabels.AMD_SMALL_MEM, # it runs much faster than many job, no need larger machine + runs_on=RunnerLabels.FUNC_TESTER_AMD, # it runs much faster than many job, no need larger machine requires=[ArtifactNames.CH_AMD_UBSAN], ), Job.ParamSet( parameter="amd_ubsan, sequential", - runs_on=RunnerLabels.AMD_SMALL_MEM, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_UBSAN], ), Job.ParamSet( parameter="amd_debug, distributed plan, s3 storage, parallel", - runs_on=RunnerLabels.AMD_MEDIUM, # large machine - no boost, why? + runs_on=RunnerLabels.FUNC_TESTER_AMD, # large machine - no boost, why? requires=[ArtifactNames.CH_AMD_DEBUG], ), Job.ParamSet( parameter="amd_debug, distributed plan, s3 storage, sequential", - runs_on=RunnerLabels.AMD_SMALL, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_DEBUG], ), *[ Job.ParamSet( parameter=f"amd_tsan, s3 storage, parallel, {batch}/{total_batches}", - runs_on=RunnerLabels.AMD_MEDIUM, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_TSAN], ) for total_batches in (2,) @@ -671,7 +678,7 @@ class JobConfigs: *[ Job.ParamSet( parameter=f"amd_tsan, s3 storage, sequential, {batch}/{total_batches}", - runs_on=RunnerLabels.AMD_SMALL_MEM, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_AMD_TSAN], ) for total_batches in (2,) @@ -679,12 +686,12 @@ class JobConfigs: ], Job.ParamSet( parameter="arm_binary, parallel", - runs_on=RunnerLabels.ARM_MEDIUM_CPU, + runs_on=RunnerLabels.FUNC_TESTER_ARM, requires=[ArtifactNames.CH_ARM_BINARY], ), Job.ParamSet( parameter="arm_binary, sequential", - runs_on=RunnerLabels.ARM_SMALL, + runs_on=RunnerLabels.FUNC_TESTER_ARM, requires=[ArtifactNames.CH_ARM_BINARY], ), ) @@ -692,7 +699,7 @@ class JobConfigs: *[ Job.ParamSet( parameter=f"{BuildTypes.AMD_COVERAGE}, {batch}/{total_batches}", - runs_on=RunnerLabels.AMD_SMALL, + runs_on=RunnerLabels.FUNC_TESTER_AMD, requires=[ArtifactNames.CH_COV_BIN], ) for total_batches in (8,) @@ -704,18 +711,18 @@ class JobConfigs: ).parametrize( Job.ParamSet( parameter="arm_asan, azure, parallel", - runs_on=RunnerLabels.ARM_MEDIUM, + runs_on=RunnerLabels.FUNC_TESTER_ARM, requires=[ArtifactNames.CH_ARM_ASAN], ), Job.ParamSet( parameter="arm_asan, azure, sequential", - runs_on=RunnerLabels.ARM_SMALL_MEM, + runs_on=RunnerLabels.FUNC_TESTER_ARM, requires=[ArtifactNames.CH_ARM_ASAN], ), ) bugfix_validation_it_job = ( common_integration_test_job_config.set_name(JobNames.BUGFIX_VALIDATE_IT) - .set_runs_on(RunnerLabels.AMD_SMALL_MEM) + .set_runs_on(RunnerLabels.FUNC_TESTER_AMD) .set_command( "python3 ./ci/jobs/integration_test_job.py --options BugfixValidation" ) @@ -887,8 +894,8 @@ class JobConfigs: build_llvm_coverage_job = common_build_job_config.set_post_hooks( post_hooks=[ - "python3 ./ci/jobs/scripts/job_hooks/build_master_head_hook.py", - "python3 ./ci/jobs/scripts/job_hooks/build_profile_hook.py", + # "python3 ./ci/jobs/scripts/job_hooks/build_master_head_hook.py", + # "python3 ./ci/jobs/scripts/job_hooks/build_profile_hook.py", ], ).parametrize( Job.ParamSet( @@ -1083,7 +1090,7 @@ class JobConfigs: runs_on=["#from param"], command='python3 ./ci/jobs/performance_tests.py --test-options "{PARAMETER}"', # TODO: switch to stateless-test image - run_in_docker="clickhouse/performance-comparison", + run_in_docker="altinityinfra/performance-comparison", digest_config=Job.CacheDigestConfig( include_paths=[ "./tests/performance/", @@ -1119,7 +1126,7 @@ class JobConfigs: runs_on=["#from param"], command='python3 ./ci/jobs/performance_tests.py --test-options "{PARAMETER}"', # TODO: switch to stateless-test image - run_in_docker="clickhouse/performance-comparison", + run_in_docker="altinityinfra/performance-comparison", digest_config=Job.CacheDigestConfig( include_paths=[ "./tests/performance/", @@ -1152,7 +1159,7 @@ class JobConfigs: "./ci/jobs/scripts/functional_tests/setup_log_cluster.sh", ], ), - run_in_docker="clickhouse/stateless-test+--shm-size=16g+--network=host", + run_in_docker="altinityinfra/stateless-test+--shm-size=16g+--network=host", ).parametrize( Job.ParamSet( parameter=BuildTypes.AMD_RELEASE, @@ -1178,7 +1185,7 @@ class JobConfigs: "./src/Functions", ], ), - run_in_docker="clickhouse/docs-builder", + run_in_docker="altinityinfra/docs-builder", requires=[JobNames.STYLE_CHECK, ArtifactNames.CH_ARM_BINARY], ) docs_job_mintlify = Job.Config( @@ -1194,17 +1201,19 @@ class JobConfigs: "./changelogs/" ], ), - run_in_docker="clickhouse/docs-builder" + run_in_docker="altinityinfra/docs-builder" ) docker_server = Job.Config( name=JobNames.DOCKER_SERVER, runs_on=RunnerLabels.STYLE_CHECK_AMD, - command="python3 ./ci/jobs/docker_server.py --tag-type head --allow-build-reuse", + command="python3 ./ci/jobs/docker_server.py --tag-type head --allow-build-reuse --push", digest_config=Job.CacheDigestConfig( include_paths=[ "./ci/jobs/docker_server.py", "./docker/server", "./docker/keeper", + ".github/grype", + ".github/workflows/grype_scan.yml", ], ), requires=["Build (amd_release)", "Build (arm_release)"], @@ -1214,12 +1223,14 @@ class JobConfigs: docker_keeper = Job.Config( name=JobNames.DOCKER_KEEPER, runs_on=RunnerLabels.STYLE_CHECK_AMD, - command="python3 ./ci/jobs/docker_server.py --tag-type head --allow-build-reuse", + command="python3 ./ci/jobs/docker_server.py --tag-type head --allow-build-reuse --push", digest_config=Job.CacheDigestConfig( include_paths=[ "./ci/jobs/docker_server.py", "./docker/server", "./docker/keeper", + ".github/grype", + ".github/workflows/grype_scan.yml", ], ), requires=["Build (amd_release)", "Build (arm_release)"], @@ -1233,7 +1244,7 @@ class JobConfigs: digest_config=Job.CacheDigestConfig( include_paths=["./ci/jobs/sqlancer_job.sh", "./ci/docker/sqlancer-test"], ), - run_in_docker="clickhouse/sqlancer-test", + run_in_docker="altinityinfra/sqlancer-test", timeout=3600, ).parametrize( Job.ParamSet( @@ -1252,7 +1263,7 @@ class JobConfigs: ], ), requires=[ArtifactNames.CH_ARM_RELEASE], - run_in_docker="clickhouse/stateless-test", + run_in_docker="altinityinfra/stateless-test", timeout=10800, ) sqllogic_test_master_job = Job.Config( @@ -1266,7 +1277,7 @@ class JobConfigs: ], ), requires=[ArtifactNames.CH_ARM_RELEASE], - run_in_docker="clickhouse/stateless-test", + run_in_docker="altinityinfra/stateless-test", timeout=10800, ) jepsen_keeper = Job.Config( @@ -1317,13 +1328,13 @@ class JobConfigs: vector_search_stress_job = Job.Config( name="Vector Search Stress", runs_on=RunnerLabels.ARM_MEDIUM, - run_in_docker="clickhouse/performance-comparison", + run_in_docker="altinityinfra/performance-comparison", command="python3 ./ci/jobs/vector_search_stress_tests.py", ) llvm_coverage_job = Job.Config( name=JobNames.LLVM_COVERAGE, runs_on=RunnerLabels.AMD_SMALL, - run_in_docker="clickhouse/test-base", + run_in_docker="altinityinfra/test-base", requires=[ ArtifactNames.CH_AMD_LLVM_COVERAGE_BUILD, ArtifactNames.UNITTEST_LLVM_COVERAGE, diff --git a/ci/docker/binary-builder/Dockerfile b/ci/docker/binary-builder/Dockerfile index ed373c47d252..115a4bd58475 100644 --- a/ci/docker/binary-builder/Dockerfile +++ b/ci/docker/binary-builder/Dockerfile @@ -1,6 +1,6 @@ -# docker build -t clickhouse/binary-builder . +# docker build -t altinityinfra/binary-builder . ARG FROM_TAG -FROM clickhouse/fasttest:$FROM_TAG +FROM altinityinfra/fasttest:$FROM_TAG ARG TARGETARCH ENV CC=clang-${LLVM_VERSION} ENV CXX=clang++-${LLVM_VERSION} @@ -8,10 +8,10 @@ ENV CXX=clang++-${LLVM_VERSION} #non-functional change # If the cctools is updated, then first build it in the CI, then update here in a different commit -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /cctools /cctools +COPY --from=altinityinfra/cctools:c0aae9395f33405b6e2b /cctools /cctools # We need OpenSSL FIPS in permissive mode for build on MasterCI -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad \ +COPY --from=altinityinfra/cctools:c0aae9395f33405b6e2b \ /opt/openssl-fips/openssl.cnf \ /opt/openssl-fips/fipsmodule.cnf \ /opt/openssl-fips/fips.so \ @@ -23,6 +23,9 @@ RUN apt-get update \ RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \ && apt-get update \ + && apt-get remove --yes clang-tools-${LLVM_VERSION} || true \ + && apt-get install --yes \ + clang-tools-${LLVM_VERSION} \ && apt-get install --yes \ binutils-dev \ build-essential \ diff --git a/ci/docker/cctools/Dockerfile b/ci/docker/cctools/Dockerfile index 3a77e61187ab..b3bfab7a805c 100644 --- a/ci/docker/cctools/Dockerfile +++ b/ci/docker/cctools/Dockerfile @@ -1,10 +1,10 @@ -# docker build -t clickhouse/cctools . +# docker build -t altinityinfra/cctools . # This is a hack to significantly reduce the build time of the clickhouse/binary-builder # It's based on the assumption that we don't care of the cctools version so much # It even does not depend on the clickhouse/fasttest in the `docker/images.json` ARG FROM_TAG=latest -FROM clickhouse/fasttest:$FROM_TAG AS builder +FROM altinityinfra/fasttest:$FROM_TAG AS builder ENV CC=clang-${LLVM_VERSION} ENV CXX=clang++-${LLVM_VERSION} diff --git a/ci/docker/fasttest/Dockerfile b/ci/docker/fasttest/Dockerfile index d0c7f30ee314..48412b092395 100644 --- a/ci/docker/fasttest/Dockerfile +++ b/ci/docker/fasttest/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/fasttest . +# docker build -t altinityinfra/fasttest . FROM ubuntu:22.04 # ARG for quick switch to a given ubuntu mirror @@ -26,7 +26,7 @@ RUN apt-get update \ time \ awscli \ --yes --no-install-recommends --verbose-versions \ - && export LLVM_PUBKEY_HASH="bda960a8da687a275a2078d43c111d66b1c6a893a3275271beedf266c1ff4a0cdecb429c7a5cccf9f486ea7aa43fd27f" \ + && export LLVM_PUBKEY_HASH="5ffc7c9a9299ce774f81cada703e23ebba5bdfb0345b6c3b667b3ead7aa21c75ef62ccd74f7a8f2aa0cbe158d3068bbe" \ && wget -nv -O /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key \ && echo "${LLVM_PUBKEY_HASH} /tmp/llvm-snapshot.gpg.key" | sha384sum -c \ && apt-key add /tmp/llvm-snapshot.gpg.key \ @@ -110,7 +110,7 @@ RUN ARCH=$(uname -m) && \ rustup target add riscv64gc-unknown-linux-gnu # Note, libmpfr6 is also a requirement for gdb -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /opt/gdb /opt/gdb +COPY --from=altinityinfra/cctools:c0aae9395f33405b6e2b /opt/gdb /opt/gdb # Give suid to gdb to grant it attach permissions RUN chmod u+s /opt/gdb/bin/gdb ENV PATH="/opt/gdb/bin:${PATH}" diff --git a/ci/docker/fuzzer/Dockerfile b/ci/docker/fuzzer/Dockerfile index b316220a808e..cf694a57a0de 100644 --- a/ci/docker/fuzzer/Dockerfile +++ b/ci/docker/fuzzer/Dockerfile @@ -1,7 +1,7 @@ # rebuild in #33610 -# docker build -t clickhouse/fuzzer . +# docker build -t altinityinfra/fuzzer . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG ENV LANG=C.UTF-8 @@ -30,4 +30,4 @@ CMD set -o pipefail \ && cd /workspace \ && timeout --verbose --signal 9 1h /run-fuzzer.sh 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log -# docker run --network=host --volume :/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/fuzzer +# docker run --network=host --volume :/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> altinityinfra/fuzzer diff --git a/ci/docker/integration/arrowflight/Dockerfile b/ci/docker/integration/arrowflight/Dockerfile index 2732f366f83b..ed5d8546efa2 100644 --- a/ci/docker/integration/arrowflight/Dockerfile +++ b/ci/docker/integration/arrowflight/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/arrowflight-server-test . +# docker build -t altinityinfra/arrowflight-server-test . # Rebuild to enable SBOM/provenance attestations (see #97903, #98511) FROM python:3.9-slim diff --git a/ci/docker/integration/base/Dockerfile b/ci/docker/integration/base/Dockerfile index 94d08ba819a3..d1638df54915 100644 --- a/ci/docker/integration/base/Dockerfile +++ b/ci/docker/integration/base/Dockerfile @@ -1,7 +1,7 @@ # rebuild in #33610 -# docker build -t clickhouse/integration-test . +# docker build -t altinityinfra/integration-test . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG SHELL ["/bin/bash", "-c"] @@ -71,10 +71,10 @@ maxClientCnxns=80' > /opt/zookeeper/conf/zoo.cfg && \ ENV TZ=Etc/UTC RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /opt/gdb /opt/gdb +COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 /opt/gdb /opt/gdb ENV PATH="/opt/gdb/bin:${PATH}" -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad \ +COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 \ /opt/openssl-fips/openssl.cnf \ /opt/openssl-fips/fipsmodule.cnf \ /opt/openssl-fips/fips.so \ diff --git a/ci/docker/integration/clickhouse_with_hms_catalog/Dockerfile b/ci/docker/integration/clickhouse_with_hms_catalog/Dockerfile index 40d107d0c28b..9337e28926a2 100644 --- a/ci/docker/integration/clickhouse_with_hms_catalog/Dockerfile +++ b/ci/docker/integration/clickhouse_with_hms_catalog/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/integration-test-with-hms . +# docker build -t altinityinfra/integration-test-with-hms . ARG FROM_TAG=latest FROM openjdk:8-jre-slim AS build diff --git a/ci/docker/integration/clickhouse_with_unity_catalog/Dockerfile b/ci/docker/integration/clickhouse_with_unity_catalog/Dockerfile index 50da25ddc78e..855746c23200 100644 --- a/ci/docker/integration/clickhouse_with_unity_catalog/Dockerfile +++ b/ci/docker/integration/clickhouse_with_unity_catalog/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/integration-test-with-unity-catalog . +# docker build -t altinityinfra/integration-test-with-unity-catalog . ARG FROM_TAG=latest FROM clickhouse/integration-test:$FROM_TAG diff --git a/ci/docker/integration/helper_container/Dockerfile b/ci/docker/integration/helper_container/Dockerfile index 1084d087e53b..81d658705836 100644 --- a/ci/docker/integration/helper_container/Dockerfile +++ b/ci/docker/integration/helper_container/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/integration-helper . +# docker build -t altinityinfra/integration-helper . # Helper docker container to run iptables without sudo FROM alpine:3.18 diff --git a/ci/docker/integration/kerberos_kdc/Dockerfile b/ci/docker/integration/kerberos_kdc/Dockerfile index a203c33a3313..a7f989bf4a56 100644 --- a/ci/docker/integration/kerberos_kdc/Dockerfile +++ b/ci/docker/integration/kerberos_kdc/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/kerberos-kdc . +# docker build -t altinityinfra/kerberos-kdc . FROM centos:6 RUN sed -i '/^mirrorlist/s/^/#/;/^#baseurl/{s/#//;s/mirror.centos.org\/centos\/$releasever/vault.centos.org\/6.10/}' /etc/yum.repos.d/*B* diff --git a/ci/docker/integration/mysql57/Dockerfile b/ci/docker/integration/mysql57/Dockerfile index 105841a57319..dd49afb28a9b 100644 --- a/ci/docker/integration/mysql57/Dockerfile +++ b/ci/docker/integration/mysql57/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/test-mysql57 . +# docker build -t altinityinfra/test-mysql57 . FROM mysql:5.7 diff --git a/ci/docker/integration/mysql80/Dockerfile b/ci/docker/integration/mysql80/Dockerfile index 523ecd787dcc..167a85ab5475 100644 --- a/ci/docker/integration/mysql80/Dockerfile +++ b/ci/docker/integration/mysql80/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/test-mysql80 . +# docker build -t altinityinfra/test-mysql80 . FROM ubuntu/mysql:8.0-22.04_edge diff --git a/ci/docker/integration/mysql_dotnet_client/Dockerfile b/ci/docker/integration/mysql_dotnet_client/Dockerfile index 10f8c0d70f8a..a576c3598a03 100644 --- a/ci/docker/integration/mysql_dotnet_client/Dockerfile +++ b/ci/docker/integration/mysql_dotnet_client/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/mysql_dotnet_client . +# docker build -t altinityinfra/mysql_dotnet_client . # Rebuild to enable SBOM/provenance attestations (see #97903, #98511) FROM ubuntu:22.04 diff --git a/ci/docker/integration/mysql_golang_client/Dockerfile b/ci/docker/integration/mysql_golang_client/Dockerfile index e70fefc67267..2975ba798c1d 100644 --- a/ci/docker/integration/mysql_golang_client/Dockerfile +++ b/ci/docker/integration/mysql_golang_client/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/mysql-golang-client . +# docker build -t altinityinfra/mysql-golang-client . # MySQL golang client docker container # Rebuild to enable SBOM/provenance attestations (see #97903, #98511) diff --git a/ci/docker/integration/mysql_java_client/Dockerfile b/ci/docker/integration/mysql_java_client/Dockerfile index c4d60bece83d..373e78bc0584 100644 --- a/ci/docker/integration/mysql_java_client/Dockerfile +++ b/ci/docker/integration/mysql_java_client/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/mysql-java-client . +# docker build -t altinityinfra/mysql-java-client . # MySQL Java client docker container # Rebuild to enable SBOM/provenance attestations (see #97903, #98511) diff --git a/ci/docker/integration/mysql_js_client/Dockerfile b/ci/docker/integration/mysql_js_client/Dockerfile index a15dd566244e..7146bbf1d142 100644 --- a/ci/docker/integration/mysql_js_client/Dockerfile +++ b/ci/docker/integration/mysql_js_client/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/mysql-js-client . +# docker build -t altinityinfra/mysql-js-client . # MySQL JavaScript client docker container # Rebuild to enable SBOM/provenance attestations (see #97903, #98511) @@ -6,6 +6,7 @@ FROM node:22-alpine WORKDIR /usr/app -RUN npm install -g npm@11 && npm install mysql +# NOTE (strtgbb): removed npm upgrade step to avoid a dependency issue +RUN npm install mysql COPY ./test.js ./test.js diff --git a/ci/docker/integration/mysql_php_client/Dockerfile b/ci/docker/integration/mysql_php_client/Dockerfile index c5d1d036846b..4eb670dfe80f 100644 --- a/ci/docker/integration/mysql_php_client/Dockerfile +++ b/ci/docker/integration/mysql_php_client/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/mysql-php-client . +# docker build -t altinityinfra/mysql-php-client . # MySQL PHP client docker container # Rebuild to enable SBOM/provenance attestations (see #97903, #98511) diff --git a/ci/docker/integration/postgresql_java_client/Dockerfile b/ci/docker/integration/postgresql_java_client/Dockerfile index 50e46c71df2e..bb3e5ef4458e 100644 --- a/ci/docker/integration/postgresql_java_client/Dockerfile +++ b/ci/docker/integration/postgresql_java_client/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/postgresql-java-client . +# docker build -t altinityinfra/postgresql-java-client . # PostgreSQL Java client docker container # Rebuild to enable SBOM/provenance attestations (see #97903, #98511) diff --git a/ci/docker/integration/resolver/Dockerfile b/ci/docker/integration/resolver/Dockerfile index 423faf835ae1..1f639bb2793d 100644 --- a/ci/docker/integration/resolver/Dockerfile +++ b/ci/docker/integration/resolver/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/python-bottle . +# docker build -t altinityinfra/python-bottle . # Helper docker container to run python bottle apps # python cgi module is dropped in 3.13 - pin to 3.12 diff --git a/ci/docker/integration/runner/Dockerfile b/ci/docker/integration/runner/Dockerfile index a1b7bbe1eae7..6b2c679699cb 100644 --- a/ci/docker/integration/runner/Dockerfile +++ b/ci/docker/integration/runner/Dockerfile @@ -1,6 +1,6 @@ -# docker build -t clickhouse/integration-tests-runner . +# docker build -t altinityinfra/integration-tests-runner . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG # ARG for quick switch to a given ubuntu mirror ARG apt_archive="http://archive.ubuntu.com" @@ -94,10 +94,10 @@ RUN set -x \ COPY modprobe.sh /usr/local/bin/modprobe COPY misc/ /misc/ -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /opt/gdb /opt/gdb +COPY --from=altinityinfra/cctools:c0aae9395f33405b6e2b /opt/gdb /opt/gdb ENV PATH="/opt/gdb/bin:${PATH}" -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad \ +COPY --from=altinityinfra/cctools:c0aae9395f33405b6e2b \ /opt/openssl-fips/openssl.cnf \ /opt/openssl-fips/fipsmodule.cnf \ /opt/openssl-fips/fips.so \ diff --git a/ci/docker/integration/s3_proxy/Dockerfile b/ci/docker/integration/s3_proxy/Dockerfile index 5858218e4e4c..df8d8f00f216 100644 --- a/ci/docker/integration/s3_proxy/Dockerfile +++ b/ci/docker/integration/s3_proxy/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/s3-proxy . +# docker build -t altinityinfra/s3-proxy . FROM nginx:alpine COPY run.sh /run.sh diff --git a/ci/docker/keeper-jepsen-test/Dockerfile b/ci/docker/keeper-jepsen-test/Dockerfile index 3c5d0a6ecb42..6633d81193d5 100644 --- a/ci/docker/keeper-jepsen-test/Dockerfile +++ b/ci/docker/keeper-jepsen-test/Dockerfile @@ -1,7 +1,7 @@ # rebuild in #33610 # docker build -t clickhouse/keeper-jepsen-test . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG ENV DEBIAN_FRONTEND=noninteractive ENV CLOJURE_VERSION=1.10.3.814 diff --git a/ci/docker/libfuzzer/Dockerfile b/ci/docker/libfuzzer/Dockerfile index fbd6d86808dd..aaa7e2bc2c5d 100644 --- a/ci/docker/libfuzzer/Dockerfile +++ b/ci/docker/libfuzzer/Dockerfile @@ -1,6 +1,6 @@ # docker build -t clickhouse/libfuzzer . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG # ARG for quick switch to a given ubuntu mirror ARG apt_archive="http://archive.ubuntu.com" diff --git a/ci/docker/performance-comparison/Dockerfile b/ci/docker/performance-comparison/Dockerfile index df86c23e26b6..53303392d9ab 100644 --- a/ci/docker/performance-comparison/Dockerfile +++ b/ci/docker/performance-comparison/Dockerfile @@ -1,7 +1,7 @@ -# docker build -t clickhouse/performance-comparison . +# docker build -t altinityinfra/performance-comparison . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG # Remove problematic Kitware repository RUN rm -f /etc/apt/sources.list.d/*kitware* || true @@ -40,7 +40,7 @@ COPY requirements.txt / RUN python3.13 -m pip install --no-cache-dir -r requirements.txt -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /opt/gdb /opt/gdb +COPY --from=altinityinfra/cctools:c0aae9395f33405b6e2b /opt/gdb /opt/gdb ENV PATH="/opt/gdb/bin:${PATH}" # aws cli to acquire secrets and params from ssm diff --git a/ci/docker/server-jepsen-test/Dockerfile b/ci/docker/server-jepsen-test/Dockerfile index fd70fc457020..54a4626e2892 100644 --- a/ci/docker/server-jepsen-test/Dockerfile +++ b/ci/docker/server-jepsen-test/Dockerfile @@ -1,7 +1,7 @@ # rebuild in #33610 # docker build -t clickhouse/server-jepsen-test . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG ENV DEBIAN_FRONTEND=noninteractive ENV CLOJURE_VERSION=1.10.3.814 diff --git a/ci/docker/sqlancer-test/Dockerfile b/ci/docker/sqlancer-test/Dockerfile index 2aa5aba9788d..3c5cea2ef7e0 100644 --- a/ci/docker/sqlancer-test/Dockerfile +++ b/ci/docker/sqlancer-test/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/sqlancer-test . +# docker build -t altinityinfra/sqlancer-test . FROM ubuntu:22.04 # ARG for quick switch to a given ubuntu mirror diff --git a/ci/docker/stateless-test/Dockerfile b/ci/docker/stateless-test/Dockerfile index 926421404372..a62578f0adc3 100644 --- a/ci/docker/stateless-test/Dockerfile +++ b/ci/docker/stateless-test/Dockerfile @@ -1,9 +1,9 @@ -# docker build -t clickhouse/stateless-test . +# docker build -t altinityinfra/stateless-test . ARG FROM_TAG=latest ARG REDPANDA_VERSION=v25.1.3 FROM docker.redpanda.com/redpandadata/redpanda:${REDPANDA_VERSION} AS redpanda -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.6.20200320/clickhouse-odbc-1.1.6-Linux.tar.gz" @@ -120,10 +120,10 @@ ENV PYTHONPATH=".:./ci" # A directory for cache RUN mkdir /dev/shm/clickhouse -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /opt/gdb /opt/gdb +COPY --from=altinityinfra/cctools:c0aae9395f33405b6e2b /opt/gdb /opt/gdb ENV PATH="/opt/gdb/bin:${PATH}" -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad \ +COPY --from=altinityinfra/cctools:c0aae9395f33405b6e2b \ /opt/openssl-fips/openssl.cnf \ /opt/openssl-fips/fipsmodule.cnf \ /opt/openssl-fips/fips.so \ diff --git a/ci/docker/stress-test/Dockerfile b/ci/docker/stress-test/Dockerfile index 866480f27a8b..0b0a8fcba8e6 100644 --- a/ci/docker/stress-test/Dockerfile +++ b/ci/docker/stress-test/Dockerfile @@ -1,7 +1,7 @@ # rebuild in #33610 -# docker build -t clickhouse/stress-test . +# docker build -t altinityinfra/stress-test . ARG FROM_TAG=latest -FROM clickhouse/stateless-test:$FROM_TAG +FROM altinityinfra/stateless-test:$FROM_TAG RUN apt-get update -y \ && env DEBIAN_FRONTEND=noninteractive \ diff --git a/ci/docker/stress-test/README.md b/ci/docker/stress-test/README.md index fe73555fbd23..3d0fa2c9f467 100644 --- a/ci/docker/stress-test/README.md +++ b/ci/docker/stress-test/README.md @@ -6,7 +6,7 @@ Usage: ``` $ ls $HOME/someclickhouse clickhouse-client_18.14.9_all.deb clickhouse-common-static_18.14.9_amd64.deb clickhouse-server_18.14.9_all.deb -$ docker run --volume=$HOME/someclickhouse:/package_folder --volume=$HOME/test_output:/test_output clickhouse/stress-test +$ docker run --volume=$HOME/someclickhouse:/package_folder --volume=$HOME/test_output:/test_output altinityinfra/stress-test Selecting previously unselected package clickhouse-common-static. (Reading database ... 14442 files and directories currently installed.) ... diff --git a/ci/docker/style-test/Dockerfile b/ci/docker/style-test/Dockerfile index 9c83329c0db7..7bb4e502cc4b 100644 --- a/ci/docker/style-test/Dockerfile +++ b/ci/docker/style-test/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/style-test . +# docker build -t altinityinfra/style-test . FROM ubuntu:22.04 RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \ diff --git a/ci/docker/test-base/Dockerfile b/ci/docker/test-base/Dockerfile index c530a602da06..5167f5e9a87b 100644 --- a/ci/docker/test-base/Dockerfile +++ b/ci/docker/test-base/Dockerfile @@ -1,4 +1,4 @@ -# docker build -t clickhouse/test-base . +# docker build -t altinityinfra/test-base . FROM ubuntu:22.04 # ARG for quick switch to a given ubuntu mirror @@ -105,10 +105,10 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* # Note, libmpfr6 is also a requirement for gdb -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /opt/gdb /opt/gdb +COPY --from=altinityinfra/cctools:c0aae9395f33405b6e2b /opt/gdb /opt/gdb ENV PATH="/opt/gdb/bin:${PATH}" -COPY --from=clickhouse/cctools:859fb360308eb8ac47ad \ +COPY --from=altinityinfra/cctools:c0aae9395f33405b6e2b \ /opt/openssl-fips/openssl.cnf \ /opt/openssl-fips/fipsmodule.cnf \ /opt/openssl-fips/fips.so \ diff --git a/ci/jobs/ast_fuzzer_job.py b/ci/jobs/ast_fuzzer_job.py index f8f778387dab..08685ff95724 100644 --- a/ci/jobs/ast_fuzzer_job.py +++ b/ci/jobs/ast_fuzzer_job.py @@ -14,7 +14,7 @@ from ci.praktika.result import Result from ci.praktika.utils import Shell, Utils -IMAGE_NAME = "clickhouse/fuzzer" +IMAGE_NAME = "altinityinfra/fuzzer" # Maximum number of reproduce commands to display inline before writing to file MAX_INLINE_REPRODUCE_COMMANDS = 20 diff --git a/ci/jobs/build_clickhouse.py b/ci/jobs/build_clickhouse.py index 12d9f4bfd6a9..4f48fadfb4f3 100644 --- a/ci/jobs/build_clickhouse.py +++ b/ci/jobs/build_clickhouse.py @@ -121,8 +121,15 @@ def main(): os.makedirs(build_dir, exist_ok=True) - if info.is_local_run: + if info.is_local_run or info.is_community_pr: + print( + "NOTE: Community contribution or local run - set sccache to run without AWS credentials" + ) os.environ["SCCACHE_S3_NO_CREDENTIALS"] = "true" + # NOTE (strtgbb): sccache will throw an error if AWS credentials are present with SCCACHE_S3_NO_CREDENTIALS=1 + os.environ.pop("AWS_SECRET_ACCESS_KEY", None) + os.environ.pop("AWS_ACCESS_KEY_ID", None) + else: # Default timeout (10min), can be too low, we run this in docker # anyway, will be terminated once the build is finished @@ -131,12 +138,13 @@ def main(): os.environ["CTCACHE_S3_BUCKET"] = Settings.S3_ARTIFACT_PATH os.environ["CTCACHE_S3_FOLDER"] = "ccache/clang-tidy-cache" - os.environ["CH_HOSTNAME"] = ( - "https://build-cache.eu-west-1.aws.clickhouse-staging.com" - ) - os.environ["CH_USER"] = "ci_builder" - os.environ["CH_PASSWORD"] = chcache_secret.get_value() - os.environ["CH_USE_LOCAL_CACHE"] = "false" + # NOTE (strtgbb): Not used yet, but we should look into setting up the secrets for it + # os.environ["CH_HOSTNAME"] = ( + # "https://build-cache.eu-west-1.aws.clickhouse-staging.com" + # ) + # os.environ["CH_USER"] = "ci_builder" + # os.environ["CH_PASSWORD"] = chcache_secret.get_value() + # os.environ["CH_USE_LOCAL_CACHE"] = "false" if info.pr_number == 0: cmake_cmd += " -DCLICKHOUSE_OFFICIAL_BUILD=1" diff --git a/ci/jobs/clickbench.py b/ci/jobs/clickbench.py index 5d9fe87f27b2..f4d056e2fac5 100644 --- a/ci/jobs/clickbench.py +++ b/ci/jobs/clickbench.py @@ -20,7 +20,7 @@ def install(): res = ch.install_clickbench_config() if info.is_local_run: return res - return res and ch.create_log_export_config() + return res # and ch.create_log_export_config() results.append( Result.from_commands_run(name="Install ClickHouse", command=install) @@ -32,9 +32,9 @@ def install(): def start(): res = ch.start_light() - if not info.is_local_run: - if not ch.start_log_exports(check_start_time=stop_watch.start_time): - print("WARNING: Failed to start log export") + # if not info.is_local_run: + # if not ch.start_log_exports(check_start_time=stop_watch.start_time): + # print("WARNING: Failed to start log export") return res results.append( diff --git a/ci/jobs/docker_server.py b/ci/jobs/docker_server.py index 44216b549b5f..84e2a9216748 100644 --- a/ci/jobs/docker_server.py +++ b/ci/jobs/docker_server.py @@ -19,16 +19,16 @@ temp_path = Path(f"{Utils.cwd()}/ci/tmp") GITHUB_SERVER_URL = os.getenv("GITHUB_SERVER_URL", "https://github.com") -with tempfile.NamedTemporaryFile("w", delete=False) as f: - GIT_KNOWN_HOSTS_FILE = f.name - GIT_PREFIX = ( # All commits to remote are done as robot-clickhouse - "git -c user.email=robot-clickhouse@users.noreply.github.com " - "-c user.name=robot-clickhouse -c commit.gpgsign=false " - "-c core.sshCommand=" - f"'ssh -o UserKnownHostsFile={GIT_KNOWN_HOSTS_FILE} " - "-o StrictHostKeyChecking=accept-new'" - ) - atexit.register(os.remove, f.name) +# with tempfile.NamedTemporaryFile("w", delete=False) as f: +# GIT_KNOWN_HOSTS_FILE = f.name +# GIT_PREFIX = ( # All commits to remote are done as robot-clickhouse +# "git -c user.email=robot-clickhouse@users.noreply.github.com " +# "-c user.name=robot-clickhouse -c commit.gpgsign=false " +# "-c core.sshCommand=" +# f"'ssh -o UserKnownHostsFile={GIT_KNOWN_HOSTS_FILE} " +# "-o StrictHostKeyChecking=accept-new'" +# ) +# atexit.register(os.remove, f.name) def read_build_urls(build_name: str): @@ -58,10 +58,10 @@ def docker_login(relogin: bool = True) -> None: "docker system info | grep --quiet -E 'Username|Registry'" ): Shell.check( - "docker login --username 'robotclickhouse' --password-stdin", + "docker login --username 'altinityinfra' --password-stdin", strict=True, stdin_str=Secret.Config( - "dockerhub_robot_password", type=Secret.Type.AWS_SSM_PARAMETER + "DOCKER_PASSWORD", type=Secret.Type.GH_SECRET ).get_value(), encoding="utf-8", ) @@ -166,12 +166,12 @@ def buildx_args( action_url: str, ) -> List[str]: args = [ - "--provenance=true", - "--sbom=true", + # "--provenance=true", # NOTE (strtgbb): Disable for now, incompatible with current CI + # "--sbom=true", f"--platform=linux/{arch}", f"--label=build-url={action_url}", - f"--label=com.clickhouse.build.githash={sha}", - f"--label=com.clickhouse.build.version={version}", + f"--label=com.altinity.build.githash={sha}", + f"--label=com.altinity.build.version={version}", ] if direct_urls: args.append(f"--build-arg=DIRECT_DOWNLOAD_URLS='{' '.join(direct_urls)}'") @@ -199,7 +199,7 @@ def build_and_push_image( init_args = ["docker", "buildx", "build"] if push: init_args.append("--push") - init_args.append("--output=type=image,push-by-digest=true") + init_args.append("--output=type=image") init_args.append(f"--tag={image.name}") else: init_args.append("--output=type=docker") @@ -332,6 +332,7 @@ def main(): version_dict = None if not info.is_local_run: version_dict = info.get_kv_data("version") + print(f"Version dict from kv data: {version_dict}") if not version_dict: version_dict = CHVersion.get_current_version_as_dict() if not info.is_local_run: @@ -341,6 +342,7 @@ def main(): info.add_workflow_report_message( "WARNING: ClickHouse version has not been found in workflow kv storage" ) + print(f"Version dict from repo: {version_dict}") assert version_dict if not info.is_local_run: @@ -348,10 +350,10 @@ def main(): if "server image" in info.job_name: image_path = args.image_path or "docker/server" - image_repo = args.image_repo or "clickhouse/clickhouse-server" + image_repo = args.image_repo or "altinityinfra/clickhouse-server" elif "keeper image" in info.job_name: image_path = args.image_path or "docker/keeper" - image_repo = args.image_repo or "clickhouse/clickhouse-keeper" + image_repo = args.image_repo or "altinityinfra/clickhouse-keeper" else: assert False, f"Unexpected job name [{info.job_name}]" @@ -370,7 +372,7 @@ def main(): push = True image = DockerImageData(image_repo, image_path) - tags = gen_tags(version_dict["string"], args.tag_type) + tags = [f'{info.pr_number}-{version_dict["string"]}'] repo_urls = {} direct_urls: Dict[str, List[str]] = {} @@ -417,19 +419,17 @@ def main(): repo_urls, os_, tag, - version_dict["describe"], + version_dict["string"], direct_urls, run_url=info.run_url, sha=info.sha, ) ) - if not push: - # The image is built locally only when we don't push it - # See `--output=type=docker` - test_docker_library(test_results) - - test_results.append(check_server_readme(image.path)) + # if not push: + # # The image is built locally only when we don't push it + # # See `--output=type=docker` + # test_docker_library(test_results) # NOTE (strtgbb): tests against the official docker library version of ClickHouse Result.create_from(results=test_results, stopwatch=sw).complete_job() diff --git a/ci/jobs/fast_test.py b/ci/jobs/fast_test.py index b684c1986b16..0e996147ccb2 100644 --- a/ci/jobs/fast_test.py +++ b/ci/jobs/fast_test.py @@ -172,16 +172,18 @@ def main(): os.environ["SCCACHE_LOG"] = "info" info = Info() - if info.is_local_run: - print("NOTE: It's a local run") + if info.is_local_run or info.is_community_pr: + print("NOTE: Community contribution or local run - set sccache to run without AWS credentials") os.environ["SCCACHE_S3_NO_CREDENTIALS"] = "true" else: - os.environ["CH_HOSTNAME"] = ( - "https://build-cache.eu-west-1.aws.clickhouse-staging.com" - ) - os.environ["CH_USER"] = "ci_builder" - os.environ["CH_PASSWORD"] = chcache_secret.get_value() - os.environ["CH_USE_LOCAL_CACHE"] = "false" + pass + # NOTE (strtgbb): Not used yet, but we should look into setting up the secrets for it + # os.environ["CH_HOSTNAME"] = ( + # "https://build-cache.eu-west-1.aws.clickhouse-staging.com" + # ) + # os.environ["CH_USER"] = "ci_builder" + # os.environ["CH_PASSWORD"] = chcache_secret.get_value() + # os.environ["CH_USE_LOCAL_CACHE"] = "false" Utils.add_to_PATH( f"{os.path.dirname(clickhouse_bin_path)}:{current_directory}/tests" @@ -307,7 +309,7 @@ def main(): res = CH.run_test(fast_test_command) - test_results = FTResultsProcessor(wd=Settings.OUTPUT_DIR).run() + test_results = FTResultsProcessor(wd=Settings.OUTPUT_DIR, test_options=["fast"]).run() if not res: test_results.results.append( Result.create_from( diff --git a/ci/jobs/functional_tests.py b/ci/jobs/functional_tests.py index b6df4a5aae9c..c2022c45555f 100644 --- a/ci/jobs/functional_tests.py +++ b/ci/jobs/functional_tests.py @@ -1,5 +1,6 @@ import argparse import os +import re import random import subprocess from pathlib import Path @@ -95,7 +96,7 @@ def run_tests( global_time_limit_option = ( f"--global_time_limit={global_time_limit}" if global_time_limit > 0 else "" ) - command = f"clickhouse-test --testname --check-zookeeper-session --hung-check --memory-limit {5*2**30} --trace \ + command = f"clickhouse-test --testname --check-zookeeper-session --hung-check --memory-limit {10*2**30} --trace \ --capture-client-stacktrace --queries ./tests/queries --test-runs {rerun_count} \ {extra_args} {global_time_limit_option} \ --queries ./tests/queries {('--order=random' if random_order else '')} -- {' '.join(tests) if tests else ''} | ts '%Y-%m-%d %H:%M:%S' \ @@ -128,6 +129,7 @@ def run_tests( "azure": " --azure-blob-storage --no-random-settings --no-random-merge-tree-settings", # azurite is slow, with randomization it can be super slow "parallel": "--no-sequential", "sequential": "--no-parallel", + "amd_tsan": " --timeout 1200", # NOTE (strtgbb): tsan is slow, increase the timeout to avoid timeout errors "flaky check": "--flaky-check", "targeted": "--flaky-check", # to disable tests not compatible with the thread fuzzer } @@ -247,12 +249,20 @@ def main(): if not info.is_local_run: # TODO: find a way to work with Azure secret so it's ok for local tests as well, for now keep azure disabled - azure_connection_string = Shell.get_output( - f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value", - verbose=True, - strict=True, - ) - os.environ["AZURE_CONNECTION_STRING"] = azure_connection_string + # os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output( + # f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value", + # verbose=True, + # ) + # NOTE(strtgbb): We pass azure credentials through the docker command, not SSM. + # NOTE(strtgbb): Azure credentials don't exist in community workflow + if info.is_community_pr: + print( + "NOTE: No azure credentials provided for community PR - disable azure storage" + ) + config_installs_args += " --no-azure" + + # NOTE(strtgbb): With the above, some tests are still trying to use azure, try this: + os.environ["USE_AZURE_STORAGE_FOR_MERGE_TREE"] = "0" else: print("Disable azure for a local run") config_installs_args += " --no-azure" @@ -360,11 +370,7 @@ def main(): args.test ), "For running flaky or bugfix_validation check locally, test case name must be provided via --test" else: - if is_bugfix_validation and Labels.PR_BUGFIX not in info.pr_labels: - # Not a bugfix PR - run a simple sanity test - tests = ["00001_select_1"] - else: - tests = targeter.get_changed_tests() + tests = targeter.get_changed_tests() if tests: print(f"Test list: [{tests}]") @@ -402,12 +408,13 @@ def main(): if res and JobStages.INSTALL_CLICKHOUSE in stages: - def configure_log_export(): - if not info.is_local_run: - print("prepare log export config") - return CH.create_log_export_config() - else: - print("skip log export config for local run") + # NOTE (strtgbb): Disable log export throughout this file, it depends on aws ssm, which we don't have configured + # def configure_log_export(): + # if not info.is_local_run: + # print("prepare log export config") + # return CH.create_log_export_config() + # else: + # print("skip log export config for local run") commands = [ f"rm -rf /etc/clickhouse-client/* /etc/clickhouse-server/* /etc/clickhouse-server1/* /etc/clickhouse-server2/*", @@ -439,8 +446,8 @@ def configure_log_export(): f"prof_prefix:{temp_dir}/jemalloc_profiles/clickhouse.jemalloc" ) - if not is_coverage: - commands.append(configure_log_export) + # if not is_coverage: + # commands.append(configure_log_export) results.append( Result.from_commands_run(name="Install ClickHouse", command=commands) @@ -464,12 +471,13 @@ def start(): if not CH.start_kafka(): print("WARNING: Failed to start Kafka") - if not Info().is_local_run: - if not CH.start_log_exports(stop_watch.start_time): - info.add_workflow_report_message( - "WARNING: Failed to start log export" - ) - print("Failed to start log export") + # Note (strtgbb): We don't use this + # if not Info().is_local_run: + # if not CH.start_log_exports(stop_watch.start_time): + # info.add_workflow_report_message( + # "WARNING: Failed to start log export" + # ) + # print("Failed to start log export") if not CH.create_minio_log_tables(): info.add_workflow_report_message( "WARNING: Failed to create minio log tables" @@ -496,6 +504,8 @@ def start(): ) res = results[-1].is_ok() + runner_options += f" --known-fails-file-path tests/broken_tests.yaml" + test_result = None if res and JobStages.TEST in stages: stop_watch_ = Utils.Stopwatch() @@ -515,7 +525,7 @@ def start(): run_sets_cnt = rerun_count if is_targeted_check else 1 rerun_count = 1 if is_targeted_check else rerun_count - ft_res_processor = FTResultsProcessor(wd=temp_dir) + ft_res_processor = FTResultsProcessor(wd=temp_dir, test_options=test_options) # For flaky check, set a soft time limit so that the test runner stops # gracefully before the job hard timeout, allowing results to be posted. @@ -655,7 +665,7 @@ def start(): ) ) elif failed_tests: - ft_res_processor = FTResultsProcessor(wd=temp_dir) + ft_res_processor = FTResultsProcessor(wd=temp_dir, test_options=test_options) run_tests( batch_num=0, batch_total=0, @@ -675,15 +685,14 @@ def start(): if success_after_rerun or failed_after_rerun: for test_case in test_result.results: if test_case.name in success_after_rerun: - if is_llvm_coverage: - print( - f"Test {test_case.name} has succeeded after rerun. Mark it as OK" - ) - test_case.remove_label(Result.Status.FAILED) - test_case.remove_label(Result.StatusExtended.FAIL) - test_case.set_status(Result.StatusExtended.OK) - else: - test_case.set_label(Result.Label.OK_ON_RETRY) + # NOTE (strtgbb): Tweaked to always mark a test that is ok on retry as ok. We want to ignore flaky tests. + print( + f"Test {test_case.name} has succeeded after rerun. Mark it as OK" + ) + test_case.remove_label(Result.Status.FAILED) + test_case.remove_label(Result.StatusExtended.FAIL) + test_case.set_status(Result.StatusExtended.OK) + test_case.set_label(Result.Label.OK_ON_RETRY) elif test_case.name in failed_after_rerun: test_case.set_label(Result.Label.FAILED_ON_RETRY) results.append(retry_result) @@ -709,6 +718,7 @@ def start(): src=CH, dest=cidb_cluster, job_name=info.job_name, + branch=info.git_branch, ).do(), ) ) @@ -791,6 +801,10 @@ def collect_logs(): if test_result: test_result.sort() + broken_tests_handler_log = os.path.join(temp_dir, "broken_tests_handler.log") + if os.path.exists(broken_tests_handler_log): + debug_files.append(broken_tests_handler_log) + R = Result.create_from( results=results, stopwatch=stop_watch, diff --git a/ci/jobs/fuzzers_job.py b/ci/jobs/fuzzers_job.py index 666178c0da60..e699063fbf31 100644 --- a/ci/jobs/fuzzers_job.py +++ b/ci/jobs/fuzzers_job.py @@ -33,8 +33,9 @@ def start(): # TODO: attach gdb # and ch.attach_gdb() - if ch.create_log_export_config(): - ch.start_log_exports(check_start_time=stop_watch.start_time) + # Note (strtgbb): We don't use this + # if ch.create_log_export_config(): + # ch.start_log_exports(check_start_time=stop_watch.start_time) if res: print("AST Fuzzer") diff --git a/ci/jobs/install_check.py b/ci/jobs/install_check.py index d5d661ef8a03..1bfe0d2bc138 100644 --- a/ci/jobs/install_check.py +++ b/ci/jobs/install_check.py @@ -6,8 +6,8 @@ from ci.praktika.result import Result from ci.praktika.utils import Shell, Utils -RPM_IMAGE = "clickhouse/install-rpm-test" -DEB_IMAGE = "clickhouse/install-deb-test" +RPM_IMAGE = "altinityinfra/install-rpm-test" +DEB_IMAGE = "altinityinfra/install-deb-test" REPO_PATH = Utils.cwd() TEMP_PATH = Path(f"{REPO_PATH}/ci/tmp/") @@ -23,7 +23,10 @@ def prepare_test_scripts(): # listening manually here. systemctl restart clickhouse-server clickhouse-client -q 'SELECT version()' -grep "$test_env" /proc/$(cat /var/run/clickhouse-server/clickhouse-server.pid)/environ""" +grep "$test_env" /proc/$(cat /var/run/clickhouse-server/clickhouse-server.pid)/environ +echo "Check Stacktrace" +output=$(clickhouse-local --stacktrace --query="SELECT throwIf(1,'throw')" 2>&1 >/dev/null || true) +echo "$output" | grep 'FunctionThrowIf::executeImpl'""" initd_via_systemd_test = r"""#!/bin/bash set -e trap "bash -ex /packages/preserve_logs.sh" ERR diff --git a/ci/jobs/integration_test_job.py b/ci/jobs/integration_test_job.py index 009513e62c7d..89d042200fe3 100644 --- a/ci/jobs/integration_test_job.py +++ b/ci/jobs/integration_test_job.py @@ -5,6 +5,9 @@ from pathlib import Path from typing import List, Tuple +import yaml # NOTE (strtgbb): Used for loading broken tests rules +import re + from more_itertools import tail from ci.jobs.scripts.find_tests import Targeting @@ -82,6 +85,106 @@ def _mark_infrastructure_errors(results: list) -> int: return count +def get_broken_tests_rules(broken_tests_file_path: str) -> dict: + if ( + not os.path.isfile(broken_tests_file_path) + or os.path.getsize(broken_tests_file_path) == 0 + ): + raise ValueError( + "There is something wrong with getting broken tests rules: " + f"file '{broken_tests_file_path}' is empty or does not exist." + ) + + with open(broken_tests_file_path, "r", encoding="utf-8") as broken_tests_file: + broken_tests = yaml.safe_load(broken_tests_file) + + compiled_rules = {"exact": {}, "pattern": {}} + + for test in broken_tests: + regex = test.get("regex") is True + rule = { + "reason": test["reason"], + } + + if test.get("message"): + rule["message"] = re.compile(test["message"]) if regex else test["message"] + + if test.get("not_message"): + rule["not_message"] = ( + re.compile(test["not_message"]) if regex else test["not_message"] + ) + if test.get("check_types"): + rule["check_types"] = test["check_types"] + + if regex: + rule["regex"] = True + compiled_rules["pattern"][re.compile(test["name"])] = rule + else: + compiled_rules["exact"][test["name"]] = rule + + return compiled_rules + + +def test_is_known_fail(broken_tests_rules, test_name, test_logs, job_flags): + matching_rules = [] + + def matches_substring(substring, log, is_regex): + if log is None: + return False + if is_regex: + return bool(substring.search(log)) + return substring in log + + broken_tests_log = f"{temp_path}/broken_tests_handler.log" + + with open(broken_tests_log, "a") as log_file: + + log_file.write(f"Checking known broken tests for failed test: {test_name}\n") + log_file.write("Potential matching rules:\n") + exact_rule = broken_tests_rules["exact"].get(test_name) + if exact_rule: + log_file.write(f"{test_name} - {exact_rule}\n") + matching_rules.append(exact_rule) + + for name_re, data in broken_tests_rules["pattern"].items(): + if name_re.fullmatch(test_name): + log_file.write(f"{name_re} - {data}\n") + matching_rules.append(data) + + if not matching_rules: + return False + + log_file.write(f"First line of test logs: {test_logs.splitlines()[0]}\n") + + for rule_data in matching_rules: + if rule_data.get("check_types") and not any( + ct in flag for ct in rule_data["check_types"] for flag in job_flags + ): + log_file.write( + f"Skip rule: Check types didn't match: '{rule_data['check_types']}' not in '{job_flags}'\n" + ) + continue # check_types didn't match → skip rule + + is_regex = rule_data.get("regex", False) + not_message = rule_data.get("not_message") + if not_message and matches_substring(not_message, test_logs, is_regex): + log_file.write( + f"Skip rule: Not message matched: '{rule_data['not_message']}'\n" + ) + continue # not_message matched → skip rule + message = rule_data.get("message") + if message and not matches_substring(message, test_logs, is_regex): + log_file.write( + f"Skip rule: Message didn't match: '{rule_data['message']}'\n" + ) + continue + + log_file.write(f"Matched rule: {rule_data}\n") + return rule_data["reason"] + + return False + + def _start_docker_in_docker(): with open("./ci/tmp/docker-in-docker.log", "w") as log_file: dockerd_proc = subprocess.Popen( @@ -653,8 +756,8 @@ def main(): session_timeout_sequential = 3600 if is_llvm_coverage: - session_timeout_parallel = 7200 - session_timeout_sequential = 7200 + session_timeout_parallel = 3600 * 3 + session_timeout_sequential = 3600 * 3 if args.session_timeout: session_timeout_parallel = args.session_timeout * 2 @@ -774,10 +877,10 @@ def main(): is_flaky_check or is_bugfix_validation or is_targeted_check or info.is_local_run ): test_result_retries = run_pytest_and_collect_results( - command=f"{' '.join(failed_test_cases)} --report-log-exclude-logs-on-passed-tests --tb=short -n 1 --dist=loadfile --session-timeout=1200", + command=f"{' '.join(failed_test_cases)} --report-log-exclude-logs-on-passed-tests --tb=short -n 1 --dist=loadfile --session-timeout=3600", env=test_env, report_name="retries", - timeout=1200 + 600, + timeout=3600 + 600, ) successful_retries = [t.name for t in test_result_retries.results if t.is_ok()] failed_retries = [t.name for t in test_result_retries.results if t.is_failure()] @@ -808,6 +911,28 @@ def main(): ) attached_files.append("./ci/tmp/dmesg.log") + broken_tests_rules = get_broken_tests_rules("tests/broken_tests.yaml") + for result in test_results: + if result.status == Result.StatusExtended.FAIL: + try: + known_fail_reason = test_is_known_fail( + broken_tests_rules, + result.name, + result.info, + job_params, + ) + except Exception as e: + print(f"Error getting known fail reason for result {result.name}: {e}") + continue + else: + if not known_fail_reason: + continue + result.status = Result.StatusExtended.BROKEN + result.info += f"\nMarked as broken: {known_fail_reason}" + + if os.path.exists(f"{temp_path}/broken_tests_handler.log"): + attached_files.append(f"{temp_path}/broken_tests_handler.log") + # For targeted checks, session-timeout is an expected risk (because of --count N # overloading), so do not propagate the synthetic "Timeout" result as a failure. if is_targeted_check: diff --git a/ci/jobs/llvm_coverage_job.py b/ci/jobs/llvm_coverage_job.py index 6af4d0605176..1ceb928aa40f 100644 --- a/ci/jobs/llvm_coverage_job.py +++ b/ci/jobs/llvm_coverage_job.py @@ -95,12 +95,12 @@ def get_git_info() -> tuple[str, list[str], str, str, str, int]: master_track_commits: list[str] = info.get_kv_data("master_track_commits_sha") or [] if not master_track_commits: merge_base = Shell.get_output( - f"gh api repos/ClickHouse/ClickHouse/compare/master...{current_commit_sha} -q .merge_base_commit.sha", + f"gh api repos/Altinity/ClickHouse/compare/master...{current_commit_sha} -q .merge_base_commit.sha", verbose=True, ).strip() if merge_base: raw = Shell.get_output( - f"gh api 'repos/ClickHouse/ClickHouse/commits?sha={merge_base}&per_page=30' -q '.[].sha'", + f"gh api 'repos/Altinity/ClickHouse/commits?sha={merge_base}&per_page=30' -q '.[].sha'", verbose=True, ) master_track_commits = raw.splitlines() diff --git a/ci/jobs/scripts/clickhouse_proc.py b/ci/jobs/scripts/clickhouse_proc.py index 18be36802fa9..0c119f771a12 100644 --- a/ci/jobs/scripts/clickhouse_proc.py +++ b/ci/jobs/scripts/clickhouse_proc.py @@ -792,17 +792,17 @@ def _reader(): return process.returncode == 0 def terminate(self, force=False): - if self.minio_proc: - # remove the webhook so it doesn't spam with errors once we stop ClickHouse - Shell.check( - "/mc admin config reset clickminio logger_webhook:ch_server_webhook", - verbose=True, - ) - Shell.check( - "/mc admin config reset clickminio audit_webhook:ch_audit_webhook", - verbose=True, - ) - + # NOTE (strtgbb): Log tables are disabled, we don't use them + # if self.minio_proc: + # # remove the webhook so it doesn't spam with errors once we stop ClickHouse + # Shell.check( + # "/mc admin config reset clickminio logger_webhook:ch_server_webhook", + # verbose=True, + # ) + # Shell.check( + # "/mc admin config reset clickminio audit_webhook:ch_audit_webhook", + # verbose=True, + # ) if self.kafka_proc: print("Stopping Redpanda broker") Shell.check("pkill -f redpanda", verbose=True) @@ -1102,12 +1102,12 @@ def dump_system_tables(self): "trace_log", "transactions_info_log", "metric_log", - "blob_storage_log", + # "blob_storage_log", # NOTE (strtgbb): contains Azure container name, currently considered secret by s3 upload secret scanner. "error_log", "query_metric_log", "part_log", - "minio_audit_logs", - "minio_server_logs", + # "minio_audit_logs", # NOTE (strtgbb): we do not use these logs + # "minio_server_logs", ] ROWS_COUNT_IN_SYSTEM_TABLE_LIMIT = 10_000_000 @@ -1328,6 +1328,7 @@ def set_random_timezone(): res = False try: if command == "logs_export_config": + exit(0) # Note (strtgbb): We don't use log exports if not Info().is_local_run: # Disable log export for local runs - ideally this command wouldn't be triggered, # but conditional disabling is complex in legacy bash scripts (run_fuzzer.sh, stress_runner.sh) @@ -1335,6 +1336,7 @@ def set_random_timezone(): else: res = True elif command == "logs_export_start": + exit(0) # Note (strtgbb): We don't use log exports # FIXME: the start_time must be preserved globally in ENV or something like that # to get the same values in different DBs # As a wild idea, it could be stored in a Info.check_start_timestamp @@ -1345,6 +1347,7 @@ def set_random_timezone(): else: res = True elif command == "logs_export_stop": + exit(0) # Note (strtgbb): We don't use log exports if not Info().is_local_run: # Disable log export for local runs - ideally this command wouldn't be triggered, # but conditional disabling is complex in legacy bash scripts (run_fuzzer.sh, stress_runner.sh) diff --git a/ci/jobs/scripts/clickhouse_version.py b/ci/jobs/scripts/clickhouse_version.py index 205858193306..1e65113f75af 100644 --- a/ci/jobs/scripts/clickhouse_version.py +++ b/ci/jobs/scripts/clickhouse_version.py @@ -1,9 +1,16 @@ import re +import sys +import os from pathlib import Path from ci.praktika.info import Info from ci.praktika.utils import Shell +# NOTE(vnemkov): extremely hackish, buts allows to reuse code from version_helper and git_helper with our modifications. + +# allow to import other packages that are located in `tests/ci` directory, like `git_helper` +import tests.ci +sys.path.append(os.path.abspath(tests.ci.__path__._path[0])) class CHVersion: FILE_WITH_VERSION_PATH = "./cmake/autogenerated_versions.txt" @@ -15,6 +22,8 @@ class CHVersion: SET(VERSION_MINOR {minor}) SET(VERSION_PATCH {patch}) SET(VERSION_GITHASH {githash}) +SET(VERSION_TWEAK {tweak}) +SET(VERSION_FLAVOUR {flavour}) SET(VERSION_DESCRIBE {describe}) SET(VERSION_STRING {string}) """ @@ -41,6 +50,8 @@ def get_release_version_as_dict(cls): "patch": versions["patch"], "revision": versions["revision"], "githash": versions["githash"], + "tweak": versions["tweak"], + "flavour": versions["flavour"], "describe": versions["describe"], "string": versions["string"], } @@ -49,6 +60,10 @@ def get_release_version_as_dict(cls): @classmethod def get_current_version_as_dict(cls): version = cls.get_release_version_as_dict() + + # NOTE (strtgbb): Just return, no need for the below logic + return version + info = Info() try: # Check if the commit is directly on the first-parent chain diff --git a/ci/jobs/scripts/find_tests.py b/ci/jobs/scripts/find_tests.py index 87583d321ced..bc7852f6b13c 100644 --- a/ci/jobs/scripts/find_tests.py +++ b/ci/jobs/scripts/find_tests.py @@ -7,8 +7,8 @@ sys.path.append("./") +from ci.jobs.scripts.cidb_cluster import CIDBCluster from ci.jobs.scripts.find_symbols import DiffToSymbols -from ci.praktika.cidb import CIDB from ci.praktika.info import Info from ci.praktika.result import Result from ci.praktika.settings import Settings @@ -51,8 +51,15 @@ class Targeting: INTEGRATION_JOB_TYPE = "Integration" STATELESS_JOB_TYPE = "Stateless" - def __init__(self, info: Info): + def __init__(self, info: Info, branch: str = ""): self.info = info + self.branch = branch or getattr(info, 'base_branch', '') + # NOTE (strtgbb): Read credentials from env directly to avoid + # a mutation bug in CIDBCluster's secret resolution path. + url = os.environ.get(Settings.SECRET_CI_DB_URL, "") + user = os.environ.get(Settings.SECRET_CI_DB_USER, "") + pwd = os.environ.get(Settings.SECRET_CI_DB_PASSWORD, "") + self.cidb = CIDBCluster(url=url, user=user, pwd=pwd) if "stateless" in info.job_name.lower(): self.job_type = self.STATELESS_JOB_TYPE elif "integration" in info.job_name.lower(): @@ -94,16 +101,12 @@ def get_changed_tests(self): return sorted(result) def get_previously_failed_tests(self): - from ci.praktika.cidb import CIDB - from ci.praktika.settings import Settings - assert self.job_type, "Unsupported job type" assert ( self.info.pr_number > 0 ), "Find tests by previous failures applicable only for PRs" tests = [] - cidb = CIDB(url=Settings.CI_DB_READ_URL, user="play", passwd="") if self.job_type == self.INTEGRATION_JOB_TYPE: test_name_pattern = "^test_" elif self.job_type == self.STATELESS_JOB_TYPE: @@ -115,7 +118,11 @@ def get_previously_failed_tests(self): JOB_TYPE=self.job_type, TEST_NAME_PATTERN=test_name_pattern, ) - query_result = cidb.query(query, log_level="") + query_result = self.cidb.do_select_query( + query, db_name=Settings.CI_DB_DB_NAME, timeout=20 + ) + if not query_result: + return [] # Parse test names from the query result for line in query_result.strip().split("\n"): if line.strip(): @@ -134,19 +141,20 @@ def get_tests_by_changed_symbols(self, symbols): """ SYMBOL_TO_TESTS_QUERY = """ SELECT groupArray(test_name) as tests - from checks_coverage_inverted + from checks_coverage_inverted FINAL where 1 - and check_start_time > now() - interval 3 days + and branch = '{BRANCH}' and check_name LIKE '{JOB_TYPE}%' and symbol = '{SYMBOL}' """ symbol_to_tests = {} - cidb = CIDB(url=Settings.CI_DB_READ_URL, user="play", passwd="") for symbol in symbols: - query = SYMBOL_TO_TESTS_QUERY.format(JOB_TYPE=self.job_type, SYMBOL=symbol) - result = cidb.query(query, log_level="") + query = SYMBOL_TO_TESTS_QUERY.format(BRANCH=self.branch, JOB_TYPE=self.job_type, SYMBOL=symbol) + result = self.cidb.do_select_query( + query, db_name=Settings.CI_DB_DB_NAME, timeout=20 + ) # Parse the ClickHouse Array result - if result.strip(): + if result and result.strip(): try: tests = ast.literal_eval(result.strip()) symbol_to_tests[symbol] = tests if isinstance(tests, list) else [] diff --git a/ci/jobs/scripts/functional_tests/export_coverage.py b/ci/jobs/scripts/functional_tests/export_coverage.py index 4b8a06ab4ca9..dcef84a76218 100644 --- a/ci/jobs/scripts/functional_tests/export_coverage.py +++ b/ci/jobs/scripts/functional_tests/export_coverage.py @@ -1,5 +1,6 @@ from ci.jobs.scripts.cidb_cluster import CIDBCluster from ci.jobs.scripts.clickhouse_proc import ClickHouseProc +from ci.praktika.settings import Settings from ci.praktika.utils import Shell, Utils temp_dir = f"{Utils.cwd()}/ci/tmp" @@ -13,6 +14,7 @@ def __init__( src: ClickHouseProc, dest: CIDBCluster, job_name: str, + branch: str, check_start_time="", to_file=False, ): @@ -20,6 +22,7 @@ def __init__( self.dest = dest assert to_file or self.dest.is_ready(), "Destination cluster is not ready" self.job_name = job_name + self.branch = branch self.check_start_time = check_start_time or Utils.timestamp_to_str( Utils.timestamp() ) @@ -59,8 +62,10 @@ def do(self): if not self.to_file: query = ( - f"INSERT INTO FUNCTION remoteSecure('{self.dest.url.removeprefix('https://')}', 'default.checks_coverage_inverted', '{self.dest.user}', '{self.dest.pwd}') " + f"INSERT INTO FUNCTION remoteSecure('{self.dest.url.removeprefix('https://').split(':')[0]}', '{Settings.CI_DB_DB_NAME}.checks_coverage_inverted', '{self.dest.user}', '{self.dest.pwd}') " + "(branch, symbol, check_start_time, check_name, test_name) " "SELECT DISTINCT " + f"'{self.branch}' AS branch, " "arrayJoin(symbol) AS symbol, " f"'{self.check_start_time}' AS check_start_time, " f"'{self.job_name}' AS check_name, " diff --git a/ci/jobs/scripts/functional_tests_results.py b/ci/jobs/scripts/functional_tests_results.py index 0c60e7950ebc..87c3fdeae358 100755 --- a/ci/jobs/scripts/functional_tests_results.py +++ b/ci/jobs/scripts/functional_tests_results.py @@ -10,6 +10,7 @@ TIMEOUT_SIGN = "[ Timeout! " UNKNOWN_SIGN = "[ UNKNOWN " SKIPPED_SIGN = "[ SKIPPED " +BROKEN_SIGN = "[ BROKEN " HUNG_SIGN = "Found hung queries in processlist" SERVER_DIED_SIGN = "Server died, terminating all processes" SERVER_DIED_SIGN2 = "Server does not respond to health check" @@ -24,10 +25,9 @@ # This ensures we only match actual test result lines, not patterns embedded in error messages # Note: Test names can contain letters, digits, underscores, hyphens, and dots TEST_RESULT_PATTERN = re.compile( - r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} ([\w\-\.]+):\s+(\[ (?:OK|FAIL|SKIPPED|UNKNOWN|Timeout!) \])\s+([\d.]+) sec\." + r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} ([\w\-\.]+):\s+(\[ (?:OK|FAIL|SKIPPED|BROKEN|UNKNOWN|Timeout!) \])\s+([\d.]+) sec\." ) - class FTResultsProcessor: @dataclasses.dataclass class Summary: @@ -36,6 +36,7 @@ class Summary: unknown: int failed: int success: int + broken: int test_results: List[Result] hung: bool = False server_died: bool = False @@ -43,9 +44,10 @@ class Summary: success_finish: bool = False test_end: bool = True - def __init__(self, wd): + def __init__(self, wd, test_options): self.tests_output_file = f"{wd}/test_result.txt" self.debug_files = [] + self.test_options = test_options def _process_test_output(self): total = 0 @@ -53,6 +55,7 @@ def _process_test_output(self): unknown = 0 failed = 0 success = 0 + broken = 0 hung = False server_died = False retries = False @@ -106,13 +109,16 @@ def _process_test_output(self): elif SKIPPED_SIGN in status_marker: skipped += 1 test_results.append((test_name, "SKIPPED", test_time, [])) + elif BROKEN_SIGN in line: + broken += 1 + test_results.append((test_name, "BROKEN", test_time, [])) else: success += int(OK_SIGN in status_marker) test_results.append((test_name, "OK", test_time, [])) test_end = False elif ( len(test_results) > 0 - and test_results[-1][1] in ("FAIL", "SKIPPED") + and test_results[-1][1] in ("FAIL", "SKIPPED", "BROKEN") and not test_end ): test_results[-1][3].append(original_line) @@ -135,6 +141,7 @@ def _process_test_output(self): info="".join(test[3])[:16384], ) ) + except Exception as e: print(f"ERROR: Failed to parse test results: {test}") traceback.print_exc() @@ -160,6 +167,7 @@ def _process_test_output(self): unknown=unknown, failed=failed, success=success, + broken=broken, test_results=test_results, hung=hung, server_died=server_died, @@ -209,7 +217,7 @@ def run(self, task_name="Tests"): pass if not info: - info = f"Failed: {s.failed}, Passed: {s.success}, Skipped: {s.skipped}" + info = f"Failed: {s.failed}, Passed: {s.success}, Skipped: {s.skipped}, Broken: {s.broken}" result = Result.create_from( name=task_name, diff --git a/ci/jobs/scripts/fuzzer/run-fuzzer.sh b/ci/jobs/scripts/fuzzer/run-fuzzer.sh index e4655a42f545..73d85dd2d0bc 100755 --- a/ci/jobs/scripts/fuzzer/run-fuzzer.sh +++ b/ci/jobs/scripts/fuzzer/run-fuzzer.sh @@ -58,8 +58,8 @@ EOL $PWD EOL - - (cd $repo_dir && python3 $repo_dir/ci/jobs/scripts/clickhouse_proc.py logs_export_config) || echo "Failed to create log export config" + # NOTE (strtgbb): Log tables are disabled, we don't use them + # (cd $repo_dir && python3 $repo_dir/ci/jobs/scripts/clickhouse_proc.py logs_export_config) || echo "Failed to create log export config" } function filter_exists_and_template @@ -185,7 +185,8 @@ function fuzz echo 'Server started and responded.' - (cd $repo_dir && python3 $repo_dir/ci/jobs/scripts/clickhouse_proc.py logs_export_start) || echo "Failed to start log exports" + # NOTE (strtgbb): Log tables are disabled, we don't use them + # (cd $repo_dir && python3 $repo_dir/ci/jobs/scripts/clickhouse_proc.py logs_export_start) || echo "Failed to start log exports" # Setup arguments for the fuzzer FUZZER_OUTPUT_SQL_FILE='' diff --git a/ci/jobs/scripts/integration_tests_configs.py b/ci/jobs/scripts/integration_tests_configs.py index 45913d0868c6..51cd568006b1 100644 --- a/ci/jobs/scripts/integration_tests_configs.py +++ b/ci/jobs/scripts/integration_tests_configs.py @@ -45,24 +45,24 @@ class TC: ] IMAGES_ENV = { - "clickhouse/dotnet-client": "DOCKER_DOTNET_CLIENT_TAG", - "clickhouse/integration-helper": "DOCKER_HELPER_TAG", - "clickhouse/integration-test": "DOCKER_BASE_TAG", - "clickhouse/kerberos-kdc": "DOCKER_KERBEROS_KDC_TAG", - "clickhouse/test-mysql80": "DOCKER_TEST_MYSQL80_TAG", - "clickhouse/test-mysql57": "DOCKER_TEST_MYSQL57_TAG", - "clickhouse/mysql-golang-client": "DOCKER_MYSQL_GOLANG_CLIENT_TAG", - "clickhouse/mysql-java-client": "DOCKER_MYSQL_JAVA_CLIENT_TAG", - "clickhouse/mysql-js-client": "DOCKER_MYSQL_JS_CLIENT_TAG", - "clickhouse/arrowflight-server-test": "DOCKER_ARROWFLIGHT_SERVER_TAG", - "clickhouse/mysql-php-client": "DOCKER_MYSQL_PHP_CLIENT_TAG", - "clickhouse/nginx-dav": "DOCKER_NGINX_DAV_TAG", - "clickhouse/postgresql-java-client": "DOCKER_POSTGRESQL_JAVA_CLIENT_TAG", - "clickhouse/python-bottle": "DOCKER_PYTHON_BOTTLE_TAG", - "clickhouse/integration-test-with-unity-catalog": "DOCKER_BASE_WITH_UNITY_CATALOG_TAG", - "clickhouse/integration-test-with-hms": "DOCKER_BASE_WITH_HMS_TAG", - "clickhouse/mysql_dotnet_client": "DOCKER_MYSQL_DOTNET_CLIENT_TAG", - "clickhouse/s3-proxy": "DOCKER_S3_PROXY_TAG", + "altinityinfra/dotnet-client": "DOCKER_DOTNET_CLIENT_TAG", + "altinityinfra/integration-helper": "DOCKER_HELPER_TAG", + "altinityinfra/integration-test": "DOCKER_BASE_TAG", + "altinityinfra/kerberos-kdc": "DOCKER_KERBEROS_KDC_TAG", + "altinityinfra/test-mysql80": "DOCKER_TEST_MYSQL80_TAG", + "altinityinfra/test-mysql57": "DOCKER_TEST_MYSQL57_TAG", + "altinityinfra/mysql-golang-client": "DOCKER_MYSQL_GOLANG_CLIENT_TAG", + "altinityinfra/mysql-java-client": "DOCKER_MYSQL_JAVA_CLIENT_TAG", + "altinityinfra/mysql-js-client": "DOCKER_MYSQL_JS_CLIENT_TAG", + "altinityinfra/arrowflight-server-test": "DOCKER_ARROWFLIGHT_SERVER_TAG", + "altinityinfra/mysql-php-client": "DOCKER_MYSQL_PHP_CLIENT_TAG", + "altinityinfra/nginx-dav": "DOCKER_NGINX_DAV_TAG", + "altinityinfra/postgresql-java-client": "DOCKER_POSTGRESQL_JAVA_CLIENT_TAG", + "altinityinfra/python-bottle": "DOCKER_PYTHON_BOTTLE_TAG", + "altinityinfra/integration-test-with-unity-catalog": "DOCKER_BASE_WITH_UNITY_CATALOG_TAG", + "altinityinfra/integration-test-with-hms": "DOCKER_BASE_WITH_HMS_TAG", + "altinityinfra/mysql_dotnet_client": "DOCKER_MYSQL_DOTNET_CLIENT_TAG", + "altinityinfra/s3-proxy": "DOCKER_S3_PROXY_TAG", } @@ -249,6 +249,11 @@ def get_tests_execution_time(info: Info, job_options: str) -> dict[str, int]: assert info.updated_at start_time_filter = f"parseDateTimeBestEffort('{info.updated_at}')" + if info.pr_number == 0: + branch_filter = f"head_ref = '{info.git_branch}'" + else: + branch_filter = f"head_ref = '{info.base_branch}'" + build = job_options.split(",", 1)[0] query = f""" @@ -260,12 +265,12 @@ def get_tests_execution_time(info: Info, job_options: str) -> dict[str, int]: SELECT splitByString('::', test_name)[1] AS file, median(test_duration_ms) AS test_duration_ms - FROM checks + FROM `gh-data`.checks WHERE (check_name LIKE 'Integration tests%') AND (check_name LIKE '%{build}%') AND (check_start_time >= ({start_time_filter} - toIntervalDay(20))) AND (check_start_time <= ({start_time_filter} - toIntervalHour(5))) - AND ((head_ref = 'master') AND startsWith(head_repo, 'ClickHouse/')) + AND ({branch_filter}) AND (file != '') AND (test_status != 'SKIPPED') AND (test_status != 'FAIL') diff --git a/ci/jobs/scripts/workflow_hooks/filter_job.py b/ci/jobs/scripts/workflow_hooks/filter_job.py index 53e1d9d04163..964245dba366 100644 --- a/ci/jobs/scripts/workflow_hooks/filter_job.py +++ b/ci/jobs/scripts/workflow_hooks/filter_job.py @@ -32,10 +32,10 @@ def only_docs(changed_files): ] PRELIMINARY_JOBS = [ - JobNames.STYLE_CHECK, + # JobNames.STYLE_CHECK, JobNames.FAST_TEST, - "Build (amd_tidy)", - "Build (arm_tidy)", + # "Build (amd_tidy)", + # "Build (arm_tidy)", ] INTEGRATION_TEST_FLAKY_CHECK_JOBS = [ @@ -223,23 +223,29 @@ def should_skip_job(job_name): ): return True, "Skipped, not labeled with 'pr-performance'" + ci_exclude_tags = _info_cache.get_kv_data("ci_exclude_tags") or [] + for tag in ci_exclude_tags: + if tag in job_name.lower(): + return True, f"Skipped, job name includes excluded tag '{tag}'" + + # NOTE (strtgbb): disabled this feature for now # If only the functional tests script changed, run only the first batch of stateless tests - if changed_files and all( - f.startswith("ci/") and f.endswith(".py") for f in changed_files - ): - if JobNames.STATELESS in job_name: - match = re.search(r"(\d)/\d", job_name) - if match and match.group(1) != "1" or "sequential" in job_name: - return True, "Skipped, only job script changed - run first batch only" - - if JobNames.INTEGRATION in job_name: - match = re.search(r"(\d)/\d", job_name) - if ( - match - and match.group(1) != "1" - or "sequential" in job_name - or "_asan" not in job_name - ): - return True, "Skipped, only job script changed - run first batch only" + # if changed_files and all( + # f.startswith("ci/") and f.endswith(".py") for f in changed_files + # ): + # if JobNames.STATELESS in job_name: + # match = re.search(r"(\d)/\d", job_name) + # if match and match.group(1) != "1" or "sequential" in job_name: + # return True, "Skipped, only job script changed - run first batch only" + + # if JobNames.INTEGRATION in job_name: + # match = re.search(r"(\d)/\d", job_name) + # if ( + # match + # and match.group(1) != "1" + # or "sequential" in job_name + # or "_asan" not in job_name + # ): + # return True, "Skipped, only job script changed - run first batch only" return False, "" diff --git a/ci/jobs/scripts/workflow_hooks/parse_ci_tags.py b/ci/jobs/scripts/workflow_hooks/parse_ci_tags.py new file mode 100644 index 000000000000..c28f59b552ee --- /dev/null +++ b/ci/jobs/scripts/workflow_hooks/parse_ci_tags.py @@ -0,0 +1,18 @@ +import re + +from ci.praktika.info import Info + + +def get_ci_tags(pr_body, tag_prefix): + pattern = rf"(- \[x\] + - true - true - https://crash.clickhouse.com/ + false + false + diff --git a/programs/server/config.yaml.example b/programs/server/config.yaml.example index 54632af26ccf..a350f487fb19 100644 --- a/programs/server/config.yaml.example +++ b/programs/server/config.yaml.example @@ -928,8 +928,8 @@ query_masking_rules: # response_content: config://http_server_default_response send_crash_reports: - enabled: true - endpoint: 'https://crash.clickhouse.com/' + enabled: false + endpoint: '' # Uncomment to disable ClickHouse internal DNS caching. # disable_internal_dns_cache: 1 diff --git a/programs/server/dashboard.html b/programs/server/dashboard.html index 2bf59c15f187..d371512145e6 100644 --- a/programs/server/dashboard.html +++ b/programs/server/dashboard.html @@ -3,7 +3,7 @@ ClickHouse Dashboard - +