From 62111bb2dcac37fd21d06ce3155dbd0492ea65ab Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Tue, 21 Oct 2025 11:25:25 +0200 Subject: [PATCH 1/4] Merge pull request #1095 from Altinity/ports/25.8/959_iceberg_addtional_columns_in_system_tables Antalya 25.8: Port of #959 and #1026 iceberg addtional columns in system tables --- .../DataLakes/IDataLakeMetadata.h | 3 ++ .../DataLakes/Iceberg/IcebergMetadata.cpp | 13 +++++ .../DataLakes/Iceberg/IcebergMetadata.h | 3 ++ .../DataLakes/Iceberg/Snapshot.h | 2 + src/Storages/System/StorageSystemTables.cpp | 54 ++++++++++++++++--- 5 files changed, 67 insertions(+), 8 deletions(-) diff --git a/src/Storages/ObjectStorage/DataLakes/IDataLakeMetadata.h b/src/Storages/ObjectStorage/DataLakes/IDataLakeMetadata.h index a7a9135a35c4..0cc361683d2e 100644 --- a/src/Storages/ObjectStorage/DataLakes/IDataLakeMetadata.h +++ b/src/Storages/ObjectStorage/DataLakes/IDataLakeMetadata.h @@ -137,6 +137,9 @@ class IDataLakeMetadata : boost::noncopyable virtual void alter(const AlterCommands & /*params*/, ContextPtr /*context*/) { throwNotImplemented("alter"); } virtual void drop(ContextPtr) { } + virtual std::optional partitionKey(ContextPtr) const { return {}; } + virtual std::optional sortingKey(ContextPtr) const { return {}; } + protected: virtual ObjectIterator createKeysIterator(Strings && data_files_, ObjectStoragePtr object_storage_, IDataLakeMetadata::FileProgressCallback callback_) const; diff --git a/src/Storages/ObjectStorage/DataLakes/Iceberg/IcebergMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/Iceberg/IcebergMetadata.cpp index 3ce0a2ee4838..6c045dd5fe9e 100644 --- a/src/Storages/ObjectStorage/DataLakes/Iceberg/IcebergMetadata.cpp +++ b/src/Storages/ObjectStorage/DataLakes/Iceberg/IcebergMetadata.cpp @@ -859,6 +859,19 @@ std::optional IcebergMetadata::totalBytes(ContextPtr local_context) cons return result; } +std::optional IcebergMetadata::partitionKey(ContextPtr) const +{ + SharedLockGuard lock(mutex); + return relevant_snapshot->partition_key; +} + +std::optional IcebergMetadata::sortingKey(ContextPtr) const +{ + SharedLockGuard lock(mutex); + return relevant_snapshot->sorting_key; +} + + ObjectIterator IcebergMetadata::iterate( const ActionsDAG * filter_dag, FileProgressCallback callback, diff --git a/src/Storages/ObjectStorage/DataLakes/Iceberg/IcebergMetadata.h b/src/Storages/ObjectStorage/DataLakes/Iceberg/IcebergMetadata.h index 200b5eae817a..a67e1262bf49 100644 --- a/src/Storages/ObjectStorage/DataLakes/Iceberg/IcebergMetadata.h +++ b/src/Storages/ObjectStorage/DataLakes/Iceberg/IcebergMetadata.h @@ -129,6 +129,9 @@ class IcebergMetadata : public IDataLakeMetadata void drop(ContextPtr context) override; + std::optional partitionKey(ContextPtr) const override; + std::optional sortingKey(ContextPtr) const override; + private: Iceberg::PersistentTableComponents initializePersistentTableComponents( StorageObjectStorageConfigurationPtr configuration, IcebergMetadataFilesCachePtr cache_ptr, ContextPtr context_); diff --git a/src/Storages/ObjectStorage/DataLakes/Iceberg/Snapshot.h b/src/Storages/ObjectStorage/DataLakes/Iceberg/Snapshot.h index 28b1b26a1a8e..47fc360ad13d 100644 --- a/src/Storages/ObjectStorage/DataLakes/Iceberg/Snapshot.h +++ b/src/Storages/ObjectStorage/DataLakes/Iceberg/Snapshot.h @@ -18,6 +18,8 @@ struct IcebergDataSnapshot std::optional total_rows; std::optional total_bytes; std::optional total_position_delete_rows; + std::optional partition_key; + std::optional sorting_key; std::optional getTotalRows() const { diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index 3f20978d879e..676366c970e5 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -22,6 +22,8 @@ #include #include #include +#include +#include #include #include #include @@ -605,18 +607,54 @@ class TablesBlockSource : public ISource ASTPtr expression_ptr; if (columns_mask[src_index++]) { - if (metadata_snapshot && (expression_ptr = metadata_snapshot->getPartitionKeyAST())) - res_columns[res_index++]->insert(format({context, *expression_ptr})); - else - res_columns[res_index++]->insertDefault(); + bool inserted = false; + // Extract from specific DataLake metadata if suitable + if (auto * obj = dynamic_cast(table.get())) + { + if (auto * dl_meta = obj->getExternalMetadata(context)) + { + if (auto p = dl_meta->partitionKey(context); p.has_value()) + { + res_columns[res_index++]->insert(*p); + inserted = true; + } + } + + } + + if (!inserted) + { + if (metadata_snapshot && (expression_ptr = metadata_snapshot->getPartitionKeyAST())) + res_columns[res_index++]->insert(format({context, *expression_ptr})); + else + res_columns[res_index++]->insertDefault(); + } } if (columns_mask[src_index++]) { - if (metadata_snapshot && (expression_ptr = metadata_snapshot->getSortingKey().expression_list_ast)) - res_columns[res_index++]->insert(format({context, *expression_ptr})); - else - res_columns[res_index++]->insertDefault(); + bool inserted = false; + + // Extract from specific DataLake metadata if suitable + if (auto * obj = dynamic_cast(table.get())) + { + if (auto * dl_meta = obj->getExternalMetadata(context)) + { + if (auto p = dl_meta->sortingKey(context); p.has_value()) + { + res_columns[res_index++]->insert(*p); + inserted = true; + } + } + } + + if (!inserted) + { + if (metadata_snapshot && (expression_ptr = metadata_snapshot->getSortingKey().expression_list_ast)) + res_columns[res_index++]->insert(format({context, *expression_ptr})); + else + res_columns[res_index++]->insertDefault(); + } } if (columns_mask[src_index++]) From 0a7353d8c5df8ae482119f9786237526b2c42017 Mon Sep 17 00:00:00 2001 From: Andrey Zvonov Date: Thu, 19 Feb 2026 15:13:27 +0100 Subject: [PATCH 2/4] add test --- .../test_system_iceberg_metadata.py | 56 +++++++++++++++++-- 1 file changed, 52 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_storage_iceberg_with_spark/test_system_iceberg_metadata.py b/tests/integration/test_storage_iceberg_with_spark/test_system_iceberg_metadata.py index 4ae18aacf2a3..d68802de625f 100644 --- a/tests/integration/test_storage_iceberg_with_spark/test_system_iceberg_metadata.py +++ b/tests/integration/test_storage_iceberg_with_spark/test_system_iceberg_metadata.py @@ -1,6 +1,8 @@ from datetime import datetime, timedelta import pytest import json +import uuid +import time from helpers.iceberg_utils import ( create_iceberg_table, @@ -17,7 +19,7 @@ def __init__(self, not_pruned, partition_pruned, min_max_index_pruned): def __repr__(self): return "PrunedInfo(not_pruned={}, partition_pruned={}, min_max_index_pruned={})".format(self.not_pruned, self.partition_pruned, self.min_max_index_pruned) - + def __eq__(self, other): return (self.not_pruned == other.not_pruned and self.partition_pruned == other.partition_pruned and @@ -113,7 +115,7 @@ def verify_result_dictionary(diction : dict, allowed_content_types : set): number_of_missing_row_values += 1 if partitioned_rows != not_deleted_files: raise ValueError("Partitioned rows are not consistent with not deleted files for file path: {}, partitioned rows: {}, not deleted files: {}".format(file_path, partitioned_rows, not_deleted_files)) - + # We have exactly one metadata file if number_of_missing_row_values != 1: raise ValueError("Not a one row value (corresponding to metadata file) is missing for file path: {}".format(file_path)) @@ -226,7 +228,7 @@ def execute_spark_query(query: str): raise date_and_time_columns = get_date_and_time_columns(instance, query_id) - + event_dates = date_and_time_columns['event_date'] event_times = date_and_time_columns['event_time'] @@ -238,4 +240,50 @@ def execute_spark_query(query: str): for time_str in event_times: current_time = datetime.fromisoformat(time_str) assert current_time <= datetime.now(), "Event time is in the future. Event time: {}, current time: {}".format(current_time, datetime.now()) - assert current_time >= (datetime.now() - timedelta(days=1)), "Event time is too old. Event time: {}, current time: {}".format(current_time, datetime.now()) \ No newline at end of file + assert current_time >= (datetime.now() - timedelta(days=1)), "Event time is too old. Event time: {}, current time: {}".format(current_time, datetime.now()) + + +@pytest.mark.parametrize("storage_type", ["s3"]) +def test_system_tables_partition_sorting_keys(started_cluster_iceberg_with_spark, storage_type): + instance = started_cluster_iceberg_with_spark.instances["node1"] + spark = started_cluster_iceberg_with_spark.spark_session + + table_name = f"test_sys_tables_keys_{storage_type}_{uuid.uuid4().hex[:8]}" + fq_table = f"spark_catalog.default.{table_name}" + + spark.sql(f"DROP TABLE IF EXISTS {fq_table}") + spark.sql(f""" + CREATE TABLE {fq_table} ( + id INT, + ts TIMESTAMP, + payload STRING + ) + USING iceberg + PARTITIONED BY (bucket(16, id), day(ts)) + TBLPROPERTIES ('format-version' = '2') + """) + spark.sql(f"ALTER TABLE {fq_table} WRITE ORDERED BY (id DESC NULLS LAST, hour(ts))") + spark.sql(f""" + INSERT INTO {fq_table} VALUES + (1, timestamp'2024-01-01 10:00:00', 'a'), + (2, timestamp'2024-01-02 11:00:00', 'b'), + (NULL, timestamp'2024-01-03 12:00:00', 'c') + """) + + time.sleep(2) + default_upload_directory( + started_cluster_iceberg_with_spark, + storage_type, + f"/iceberg_data/default/{table_name}/", + f"/iceberg_data/default/{table_name}/", + ) + + create_iceberg_table(storage_type, instance, table_name, started_cluster_iceberg_with_spark) + + res = instance.query(f""" + SELECT partition_key, sorting_key + FROM system.tables + WHERE name = '{table_name}' FORMAT csv + """).strip().lower() + + assert res == '"bucket(16, id), day(ts)","id desc, hour(ts) asc"' From 3c1bd5dd6cb721d297e9b20be0002bff895b5e14 Mon Sep 17 00:00:00 2001 From: Andrey Zvonov Date: Thu, 19 Feb 2026 15:42:27 +0100 Subject: [PATCH 3/4] fix display --- .../DataLakes/Iceberg/IcebergMetadata.cpp | 50 +++++++- .../DataLakes/Iceberg/IcebergMetadata.h | 4 +- .../ObjectStorage/DataLakes/Iceberg/Utils.cpp | 118 ++++++++++++++++++ .../ObjectStorage/DataLakes/Iceberg/Utils.h | 6 + src/Storages/System/StorageSystemTables.cpp | 6 +- 5 files changed, 174 insertions(+), 10 deletions(-) diff --git a/src/Storages/ObjectStorage/DataLakes/Iceberg/IcebergMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/Iceberg/IcebergMetadata.cpp index 6c045dd5fe9e..9a9ad6ef5302 100644 --- a/src/Storages/ObjectStorage/DataLakes/Iceberg/IcebergMetadata.cpp +++ b/src/Storages/ObjectStorage/DataLakes/Iceberg/IcebergMetadata.cpp @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -859,16 +860,36 @@ std::optional IcebergMetadata::totalBytes(ContextPtr local_context) cons return result; } -std::optional IcebergMetadata::partitionKey(ContextPtr) const +std::optional IcebergMetadata::partitionKey(ContextPtr context) const { - SharedLockGuard lock(mutex); - return relevant_snapshot->partition_key; + auto [actual_data_snapshot, actual_table_state_snapshot] = getRelevantState(context); + if (!actual_data_snapshot) + return std::nullopt; + return getPartitionKey(context, actual_table_state_snapshot); } -std::optional IcebergMetadata::sortingKey(ContextPtr) const +std::optional IcebergMetadata::sortingKey(ContextPtr context) const { - SharedLockGuard lock(mutex); - return relevant_snapshot->sorting_key; + auto [actual_data_snapshot, actual_table_state_snapshot] = getRelevantState(context); + if (!actual_data_snapshot) + return std::nullopt; + auto metadata_object = getMetadataJSONObject( + actual_table_state_snapshot.metadata_file_path, + object_storage, + persistent_components.metadata_cache, + context, + log, + persistent_components.metadata_compression_method, + persistent_components.table_uuid); + auto [schema, current_schema_id] = parseTableSchemaV2Method(metadata_object); + const auto & ch_schema = *persistent_components.schema_processor->getClickhouseTableSchemaById(current_schema_id); + auto display = getSortingKeyDisplayStringFromMetadata(metadata_object, ch_schema); + if (display) + return display; + auto key = getSortingKey(context, actual_table_state_snapshot); + if (!key.expression_list_ast) + return std::nullopt; + return format({context, *key.expression_list_ast}); } @@ -1102,6 +1123,23 @@ ColumnMapperPtr IcebergMetadata::getColumnMapperForCurrentSchema(StorageMetadata return persistent_components.schema_processor->getColumnMapperById(iceberg_table_state->schema_id); } +std::optional IcebergMetadata::getPartitionKey(ContextPtr local_context, TableStateSnapshot actual_table_state_snapshot) const +{ + auto metadata_object = getMetadataJSONObject( + actual_table_state_snapshot.metadata_file_path, + object_storage, + persistent_components.metadata_cache, + local_context, + log, + persistent_components.metadata_compression_method, + persistent_components.table_uuid); + auto [schema, current_schema_id] = parseTableSchemaV2Method(metadata_object); + return getPartitionKeyStringFromMetadata( + metadata_object, + *persistent_components.schema_processor->getClickhouseTableSchemaById(current_schema_id), + local_context); +} + KeyDescription IcebergMetadata::getSortingKey(ContextPtr local_context, TableStateSnapshot actual_table_state_snapshot) const { auto metadata_object = getMetadataJSONObject( diff --git a/src/Storages/ObjectStorage/DataLakes/Iceberg/IcebergMetadata.h b/src/Storages/ObjectStorage/DataLakes/Iceberg/IcebergMetadata.h index a67e1262bf49..a75da4b54897 100644 --- a/src/Storages/ObjectStorage/DataLakes/Iceberg/IcebergMetadata.h +++ b/src/Storages/ObjectStorage/DataLakes/Iceberg/IcebergMetadata.h @@ -148,12 +148,14 @@ class IcebergMetadata : public IDataLakeMetadata getRelevantDataSnapshotFromTableStateSnapshot(Iceberg::TableStateSnapshot table_state_snapshot, ContextPtr local_context) const; std::pair getRelevantState(const ContextPtr & context) const; + std::optional getPartitionKey(ContextPtr local_context, Iceberg::TableStateSnapshot actual_table_state_snapshot) const; + KeyDescription getSortingKey(ContextPtr local_context, Iceberg::TableStateSnapshot actual_table_state_snapshot) const; + LoggerPtr log; const ObjectStoragePtr object_storage; DB::Iceberg::PersistentTableComponents persistent_components; const DataLakeStorageSettings & data_lake_settings; const String write_format; - KeyDescription getSortingKey(ContextPtr local_context, Iceberg::TableStateSnapshot actual_table_state_snapshot) const; }; } diff --git a/src/Storages/ObjectStorage/DataLakes/Iceberg/Utils.cpp b/src/Storages/ObjectStorage/DataLakes/Iceberg/Utils.cpp index 2a1dff4caa26..ac192d274703 100644 --- a/src/Storages/ObjectStorage/DataLakes/Iceberg/Utils.cpp +++ b/src/Storages/ObjectStorage/DataLakes/Iceberg/Utils.cpp @@ -1166,6 +1166,124 @@ KeyDescription getSortingKeyDescriptionFromMetadata(Poco::JSON::Object::Ptr meta return KeyDescription::parse(order_by_str, column_description, local_context, true); } +/// Format one partition field for display in Iceberg/Spark style, e.g. "day(ts)" or "bucket(16, id)". +static String formatPartitionFieldDisplay(const String & iceberg_transform_name, const String & column_name) +{ + std::string name = Poco::toLower(iceberg_transform_name); + if (name == "identity") + return column_name; + if (name == "year" || name == "years") + return "year(" + column_name + ")"; + if (name == "month" || name == "months") + return "month(" + column_name + ")"; + if (name == "day" || name == "date" || name == "days" || name == "dates") + return "day(" + column_name + ")"; + if (name == "hour" || name == "hours") + return "hour(" + column_name + ")"; + if (name.starts_with("truncate") && name.back() == ']') + { + auto p = name.find('['); + if (p != std::string::npos) + return "truncate(" + name.substr(p + 1, name.size() - p - 2) + ", " + column_name + ")"; + } + if (name.starts_with("bucket") && name.back() == ']') + { + auto p = name.find('['); + if (p != std::string::npos) + return "bucket(" + name.substr(p + 1, name.size() - p - 2) + ", " + column_name + ")"; + } + return column_name; +} + +std::optional getPartitionKeyStringFromMetadata(Poco::JSON::Object::Ptr metadata_object, const NamesAndTypesList & /* ch_schema */, ContextPtr /* local_context */) +{ + if (!metadata_object->has(f_partition_specs) || !metadata_object->has(f_default_spec_id)) + return std::nullopt; + auto partition_spec_id = metadata_object->getValue(f_default_spec_id); + Poco::JSON::Array::Ptr partition_specs = metadata_object->getArray(f_partition_specs); + std::unordered_map source_id_to_column_name; + auto [schema, current_schema_id] = parseTableSchemaV2Method(metadata_object); + auto mapper = createColumnMapper(schema)->getStorageColumnEncoding(); + for (const auto & [col_name, source_id] : mapper) + source_id_to_column_name[source_id] = col_name; + + Poco::JSON::Object::Ptr partition_spec; + for (size_t i = 0; i < partition_specs->size(); ++i) + { + auto spec = partition_specs->getObject(static_cast(i)); + if (spec->getValue(f_spec_id) == partition_spec_id) + { + partition_spec = spec; + break; + } + } + if (!partition_spec || !partition_spec->has(f_fields)) + return std::nullopt; + auto fields = partition_spec->getArray(f_fields); + if (fields->size() == 0) + return std::nullopt; + + std::vector part_exprs; + for (UInt32 i = 0; i < fields->size(); ++i) + { + auto field = fields->getObject(i); + auto source_id = field->getValue(f_source_id); + auto it = source_id_to_column_name.find(source_id); + if (it == source_id_to_column_name.end()) + return std::nullopt; + String column_name = it->second; + auto iceberg_transform_name = field->getValue(f_transform); + part_exprs.push_back(formatPartitionFieldDisplay(iceberg_transform_name, column_name)); + } + String result; + for (size_t i = 0; i < part_exprs.size(); ++i) + { + if (i != 0) + result += ", "; + result += part_exprs[i]; + } + return result; +} + +std::optional getSortingKeyDisplayStringFromMetadata(Poco::JSON::Object::Ptr metadata_object, const NamesAndTypesList & /* ch_schema */) +{ + if (!metadata_object->has(f_sort_orders) || !metadata_object->has(f_default_sort_order_id)) + return std::nullopt; + auto sort_order_id = metadata_object->getValue(f_default_sort_order_id); + Poco::JSON::Array::Ptr sort_orders = metadata_object->getArray(f_sort_orders); + std::unordered_map source_id_to_column_name; + auto [schema, current_schema_id] = parseTableSchemaV2Method(metadata_object); + auto mapper = createColumnMapper(schema)->getStorageColumnEncoding(); + for (const auto & [col_name, source_id] : mapper) + source_id_to_column_name[source_id] = col_name; + + for (UInt32 i = 0; i < sort_orders->size(); ++i) + { + auto sort_order = sort_orders->getObject(i); + if (sort_order->getValue(f_order_id) != sort_order_id) + continue; + auto sort_fields = sort_order->getArray(f_fields); + String result; + for (UInt32 j = 0; j < sort_fields->size(); ++j) + { + auto field = sort_fields->getObject(j); + auto source_id = field->getValue(f_source_id); + auto it = source_id_to_column_name.find(source_id); + if (it == source_id_to_column_name.end()) + return std::nullopt; + String column_name = it->second; + String direction = field->getValue(f_direction) == "asc" ? " asc" : " desc"; + auto iceberg_transform_name = field->getValue(f_transform); + String expr = formatPartitionFieldDisplay(iceberg_transform_name, column_name); + if (!result.empty()) + result += ", "; + result += expr + direction; + } + return result.empty() ? std::nullopt : std::optional(result); + } + return std::nullopt; +} + DataTypePtr getFunctionResultType(const String & iceberg_transform_name, DataTypePtr source_type) { if (iceberg_transform_name.starts_with("identity") || iceberg_transform_name.starts_with("truncate")) diff --git a/src/Storages/ObjectStorage/DataLakes/Iceberg/Utils.h b/src/Storages/ObjectStorage/DataLakes/Iceberg/Utils.h index 484e01cfe869..c9e6ad88b292 100644 --- a/src/Storages/ObjectStorage/DataLakes/Iceberg/Utils.h +++ b/src/Storages/ObjectStorage/DataLakes/Iceberg/Utils.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -108,6 +109,11 @@ DataTypePtr getFunctionResultType(const String & iceberg_transform_name, DataTyp KeyDescription getSortingKeyDescriptionFromMetadata( Poco::JSON::Object::Ptr metadata_object, const NamesAndTypesList & ch_schema, ContextPtr local_context); +/// Returns Iceberg/Spark-style display string for sort order, e.g. "id desc, hour(ts) asc". +std::optional getSortingKeyDisplayStringFromMetadata( + Poco::JSON::Object::Ptr metadata_object, const NamesAndTypesList & ch_schema); +std::optional getPartitionKeyStringFromMetadata( + Poco::JSON::Object::Ptr metadata_object, const NamesAndTypesList & ch_schema, ContextPtr local_context); void sortBlockByKeyDescription(Block & block, const KeyDescription & sort_description, ContextPtr context); } diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index 676366c970e5..1020e9241903 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include @@ -609,7 +609,7 @@ class TablesBlockSource : public ISource { bool inserted = false; // Extract from specific DataLake metadata if suitable - if (auto * obj = dynamic_cast(table.get())) + if (auto * obj = dynamic_cast(table.get())) { if (auto * dl_meta = obj->getExternalMetadata(context)) { @@ -636,7 +636,7 @@ class TablesBlockSource : public ISource bool inserted = false; // Extract from specific DataLake metadata if suitable - if (auto * obj = dynamic_cast(table.get())) + if (auto * obj = dynamic_cast(table.get())) { if (auto * dl_meta = obj->getExternalMetadata(context)) { From fa81315a1b092878cb084b190bd2afcca8609b54 Mon Sep 17 00:00:00 2001 From: Andrey Zvonov Date: Thu, 19 Feb 2026 15:47:40 +0100 Subject: [PATCH 4/4] fix possible segfault --- src/Storages/System/StorageSystemTables.cpp | 43 ++++++++++++++------- 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/src/Storages/System/StorageSystemTables.cpp b/src/Storages/System/StorageSystemTables.cpp index 1020e9241903..6959e790e465 100644 --- a/src/Storages/System/StorageSystemTables.cpp +++ b/src/Storages/System/StorageSystemTables.cpp @@ -608,18 +608,27 @@ class TablesBlockSource : public ISource if (columns_mask[src_index++]) { bool inserted = false; - // Extract from specific DataLake metadata if suitable - if (auto * obj = dynamic_cast(table.get())) + + try { - if (auto * dl_meta = obj->getExternalMetadata(context)) + // Extract from specific DataLake metadata if suitable + if (auto * obj = dynamic_cast(table.get())) { - if (auto p = dl_meta->partitionKey(context); p.has_value()) + if (auto * dl_meta = obj->getExternalMetadata(context)) { - res_columns[res_index++]->insert(*p); - inserted = true; + if (auto p = dl_meta->partitionKey(context); p.has_value()) + { + res_columns[res_index++]->insert(*p); + inserted = true; + } } - } + } + } + catch (const Exception &) + { + /// Failed to get info. It's not critical, just log it. + tryLogCurrentException("StorageSystemTables"); } if (!inserted) @@ -635,18 +644,26 @@ class TablesBlockSource : public ISource { bool inserted = false; - // Extract from specific DataLake metadata if suitable - if (auto * obj = dynamic_cast(table.get())) + try { - if (auto * dl_meta = obj->getExternalMetadata(context)) + // Extract from specific DataLake metadata if suitable + if (auto * obj = dynamic_cast(table.get())) { - if (auto p = dl_meta->sortingKey(context); p.has_value()) + if (auto * dl_meta = obj->getExternalMetadata(context)) { - res_columns[res_index++]->insert(*p); - inserted = true; + if (auto p = dl_meta->sortingKey(context); p.has_value()) + { + res_columns[res_index++]->insert(*p); + inserted = true; + } } } } + catch (const Exception &) + { + /// Failed to get info. It's not critical, just log it. + tryLogCurrentException("StorageSystemTables"); + } if (!inserted) {