Skip to content

feat: Add Operator Cache for databend #18196

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Jun 20, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -336,6 +336,7 @@ http = "1"
humantime = "2.1.0"
hyper = "1"
hyper-util = { version = "0.1.9", features = ["client", "client-legacy", "tokio", "service"] }
lru = "0.12"

## in branch dev
iceberg = { version = "0.4.0", git = "https://github.com/databendlabs/iceberg-rust", rev = "d5cca1c15f240f3cb04e57569bce648933b1c79b", features = [
Expand Down
1 change: 1 addition & 0 deletions src/common/storage/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ futures = { workspace = true }
http = { workspace = true }
iceberg = { workspace = true }
log = { workspace = true }
lru = { workspace = true }
opendal = { workspace = true }
parquet = { workspace = true }
prometheus-client = { workspace = true }
Expand Down
2 changes: 2 additions & 0 deletions src/common/storage/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ pub use operator::init_operator;
pub use operator::DataOperator;
pub use operator::OperatorRegistry;

mod operator_cache;

pub mod metrics;
pub use crate::metrics::StorageMetrics;
pub use crate::metrics::StorageMetricsLayer;
Expand Down
10 changes: 10 additions & 0 deletions src/common/storage/src/operator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ use opendal::Operator;

use crate::http_client::get_storage_http_client;
use crate::metrics_layer::METRICS_LAYER;
use crate::operator_cache::get_operator_cache;
use crate::runtime_layer::RuntimeLayer;
use crate::StorageConfig;
use crate::StorageHttpClient;
Expand All @@ -68,6 +69,15 @@ static METRIC_OPENDAL_RETRIES_COUNT: LazyLock<FamilyCounter<Vec<(&'static str, S

/// init_operator will init an opendal operator based on storage config.
pub fn init_operator(cfg: &StorageParams) -> Result<Operator> {
let cache = get_operator_cache();
cache
.get_or_create(cfg)
.map_err(|e| Error::other(anyhow!("Failed to get or create operator: {}", e)))
}

/// init_operator_uncached will init an opendal operator without caching.
/// This function creates a new operator every time it's called.
pub(crate) fn init_operator_uncached(cfg: &StorageParams) -> Result<Operator> {
let op = match &cfg {
StorageParams::Azblob(cfg) => {
build_operator(init_azblob_operator(cfg)?, cfg.network_config.as_ref())?
Expand Down
105 changes: 105 additions & 0 deletions src/common/storage/src/operator_cache.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
// Copyright 2021 Datafuse Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

use std::num::NonZeroUsize;
use std::sync::Arc;
use std::sync::LazyLock;
use std::sync::Mutex;

use databend_common_base::runtime::metrics::register_counter;
use databend_common_base::runtime::metrics::register_gauge;
use databend_common_base::runtime::metrics::Counter;
use databend_common_base::runtime::metrics::Gauge;
use databend_common_exception::Result as DatabendResult;
use databend_common_meta_app::storage::StorageParams;
use log::debug;
use log::info;
use lru::LruCache;
use opendal::Operator;

use crate::operator::init_operator_uncached;

// Internal metrics for monitoring cache effectiveness
static CACHE_HIT_COUNT: LazyLock<Counter> =
LazyLock::new(|| register_counter("storage_operator_cache_hit_total"));
static CACHE_MISS_COUNT: LazyLock<Counter> =
LazyLock::new(|| register_counter("storage_operator_cache_miss_total"));
static CACHE_SIZE: LazyLock<Gauge> =
LazyLock::new(|| register_gauge("storage_operator_cache_size"));

const DEFAULT_CACHE_SIZE: usize = 1024;

/// OperatorCache provides caching for storage operators to avoid
/// frequent recreation and token refresh operations.
pub(crate) struct OperatorCache {
cache: Arc<Mutex<LruCache<StorageParams, Operator>>>,
}

impl OperatorCache {
pub(crate) fn new() -> Self {
let cache_size = std::env::var("DATABEND_OPERATOR_CACHE_SIZE")
.ok()
.and_then(|v| v.parse::<usize>().ok())
.unwrap_or(DEFAULT_CACHE_SIZE);

info!("Initializing operator cache with size: {}", cache_size);

Self {
cache: Arc::new(Mutex::new(LruCache::new(
NonZeroUsize::new(cache_size).expect("cache size must be greater than 0"),
))),
}
}

/// Get or create an operator from cache
pub(crate) fn get_or_create(&self, params: &StorageParams) -> DatabendResult<Operator> {
// Check if we have a cached operator
{
let mut cache = self.cache.lock().unwrap();
if let Some(operator) = cache.get(params) {
debug!("Operator cache hit for params: {:?}", params);
CACHE_HIT_COUNT.inc();
return Ok(operator.clone());
}
}

// Cache miss, create new operator
debug!("Operator cache miss for params: {:?}", params);
CACHE_MISS_COUNT.inc();

let operator = init_operator_uncached(params)?;

// Insert into cache
{
let mut cache = self.cache.lock().unwrap();
cache.put(params.clone(), operator.clone());
CACHE_SIZE.set(cache.len() as i64);
}

Ok(operator)
}
}

impl Default for OperatorCache {
fn default() -> Self {
Self::new()
}
}

/// Global operator cache instance
pub(crate) fn get_operator_cache() -> Arc<OperatorCache> {
static INSTANCE: LazyLock<Arc<OperatorCache>> =
LazyLock::new(|| Arc::new(OperatorCache::new()));
INSTANCE.clone()
}
32 changes: 16 additions & 16 deletions src/meta/app-storage/src/storage_params.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ use serde::Serialize;
const DEFAULT_DETECT_REGION_TIMEOUT_SEC: u64 = 10;

/// Storage params which contains the detailed storage info.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(tag = "type")]
pub enum StorageParams {
Azblob(StorageAzblobConfig),
Expand Down Expand Up @@ -292,7 +292,7 @@ impl Display for StorageParams {
}

/// Config for storage backend azblob.
#[derive(Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Clone, Default, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct StorageAzblobConfig {
pub endpoint_url: String,
pub container: String,
Expand All @@ -315,7 +315,7 @@ impl Debug for StorageAzblobConfig {
}

/// Config for storage backend fs.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct StorageFsConfig {
pub root: String,
}
Expand All @@ -331,7 +331,7 @@ impl Default for StorageFsConfig {
pub const STORAGE_FTP_DEFAULT_ENDPOINT: &str = "ftps://127.0.0.1";

/// Config for FTP and FTPS data source
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct StorageFtpConfig {
pub endpoint: String,
pub root: String,
Expand Down Expand Up @@ -366,7 +366,7 @@ impl Debug for StorageFtpConfig {
pub static STORAGE_GCS_DEFAULT_ENDPOINT: &str = "https://storage.googleapis.com";

/// Config for storage backend GCS.
#[derive(Clone, PartialEq, Eq, Deserialize, Serialize)]
#[derive(Clone, PartialEq, Eq, Hash, Deserialize, Serialize)]
pub struct StorageGcsConfig {
pub endpoint_url: String,
pub bucket: String,
Expand Down Expand Up @@ -405,7 +405,7 @@ impl Debug for StorageGcsConfig {
/// Ideally, we should export this config only when hdfs feature enabled.
/// But export this struct without hdfs feature is safe and no harm. So we
/// export it to make crates' lives that depend on us easier.
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Clone, Debug, Default, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct StorageHdfsConfig {
pub name_node: String,
pub root: String,
Expand All @@ -415,7 +415,7 @@ pub struct StorageHdfsConfig {
pub static STORAGE_S3_DEFAULT_ENDPOINT: &str = "https://s3.amazonaws.com";

/// Config for storage backend s3.
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct StorageS3Config {
pub endpoint_url: String,
pub region: String,
Expand Down Expand Up @@ -489,7 +489,7 @@ impl Debug for StorageS3Config {
}

/// Config for storage backend http.
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Clone, Debug, Default, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct StorageHttpConfig {
pub endpoint_url: String,
pub paths: Vec<String>,
Expand All @@ -499,15 +499,15 @@ pub struct StorageHttpConfig {
pub const STORAGE_IPFS_DEFAULT_ENDPOINT: &str = "https://ipfs.io";

/// Config for IPFS storage backend
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Clone, Debug, Default, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct StorageIpfsConfig {
pub endpoint_url: String,
pub root: String,
pub network_config: Option<StorageNetworkParams>,
}

/// Config for storage backend obs.
#[derive(Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Clone, Default, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct StorageObsConfig {
pub endpoint_url: String,
pub bucket: String,
Expand All @@ -533,7 +533,7 @@ impl Debug for StorageObsConfig {
}

/// config for Aliyun Object Storage Service
#[derive(Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Clone, Default, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct StorageOssConfig {
pub endpoint_url: String,
pub presign_endpoint_url: String,
Expand Down Expand Up @@ -577,7 +577,7 @@ impl Debug for StorageOssConfig {
}

/// config for Moka Object Storage Service
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct StorageMokaConfig {
pub max_capacity: u64,
pub time_to_live: i64,
Expand All @@ -599,7 +599,7 @@ impl Default for StorageMokaConfig {
}

/// config for WebHDFS Storage Service
#[derive(Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Clone, Default, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct StorageWebhdfsConfig {
pub endpoint_url: String,
pub root: String,
Expand All @@ -624,7 +624,7 @@ impl Debug for StorageWebhdfsConfig {
}
}

#[derive(Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Clone, Default, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct StorageCosConfig {
pub secret_id: String,
pub secret_key: String,
Expand All @@ -648,7 +648,7 @@ impl Debug for StorageCosConfig {
}
}

#[derive(Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Clone, Default, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct StorageHuggingfaceConfig {
/// repo_id for huggingface repo, looks like `opendal/huggingface-testdata`
pub repo_id: String,
Expand Down Expand Up @@ -698,7 +698,7 @@ pub fn mask_string(s: &str, unmask_len: usize) -> String {
}
}

#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Debug, Clone, Default, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct StorageNetworkParams {
pub retry_timeout: u64,
pub retry_io_timeout: u64,
Expand Down
Loading