From d562abd6fd84e4167b0fec3c0e94ca3ef945b9bf Mon Sep 17 00:00:00 2001 From: chejinge <945997690@qq.com> Date: Mon, 17 Jun 2024 09:18:14 +0800 Subject: [PATCH 01/11] fix:ttl will deafult 0 when keys have ttl (#2730) * fix:ttl will deafult 0 when keys have ttl --------- Co-authored-by: chejinge --- src/pika_kv.cc | 2 +- src/storage/include/storage/storage.h | 1 + src/storage/src/redis.h | 1 + src/storage/src/redis_strings.cc | 66 +++++++++++++++++++-------- src/storage/src/storage.cc | 7 ++- 5 files changed, 57 insertions(+), 20 deletions(-) diff --git a/src/pika_kv.cc b/src/pika_kv.cc index bba495a967..2d0e5c8744 100644 --- a/src/pika_kv.cc +++ b/src/pika_kv.cc @@ -533,7 +533,7 @@ void MgetCmd::Do() { cache_miss_keys_ = keys_; } db_value_status_array_.clear(); - s_ = db_->storage()->MGet(cache_miss_keys_, &db_value_status_array_); + s_ = db_->storage()->MGetWithTTL(cache_miss_keys_, &db_value_status_array_); if (!s_.ok()) { if (s_.IsInvalidArgument()) { res_.SetRes(CmdRes::kMultiKey); diff --git a/src/storage/include/storage/storage.h b/src/storage/include/storage/storage.h index 779e52cc3e..0b520f5800 100644 --- a/src/storage/include/storage/storage.h +++ b/src/storage/include/storage/storage.h @@ -1114,6 +1114,7 @@ class Storage { // For scan keys in data base std::atomic scan_keynum_exit_ = {false}; + Status MGetWithTTL(const Slice& key, std::string* value, int64_t* ttl); }; } // namespace storage diff --git a/src/storage/src/redis.h b/src/storage/src/redis.h index 84f95b67e5..ad8906ba0c 100644 --- a/src/storage/src/redis.h +++ b/src/storage/src/redis.h @@ -156,6 +156,7 @@ class Redis { Status Get(const Slice& key, std::string* value); Status MGet(const Slice& key, std::string* value); Status GetWithTTL(const Slice& key, std::string* value, int64_t* ttl); + Status MGetWithTTL(const Slice& key, std::string* value, int64_t* ttl); Status GetBit(const Slice& key, int64_t offset, int32_t* ret); Status Getrange(const Slice& key, int64_t start_offset, int64_t end_offset, std::string* ret); Status GetrangeWithValue(const Slice& key, int64_t start_offset, int64_t end_offset, diff --git a/src/storage/src/redis_strings.cc b/src/storage/src/redis_strings.cc index 471320ae55..cab41de9aa 100644 --- a/src/storage/src/redis_strings.cc +++ b/src/storage/src/redis_strings.cc @@ -357,38 +357,68 @@ Status Redis::MGet(const Slice& key, std::string* value) { return s; } +void ClearValueAndSetTTL(std::string* value, int64_t* ttl, int64_t ttl_value) { + value->clear(); + *ttl = ttl_value; +} + +int64_t CalculateTTL(int64_t expiry_time) { + int64_t current_time; + rocksdb::Env::Default()->GetCurrentTime(¤t_time); + return expiry_time - current_time >= 0 ? expiry_time - current_time : -2; +} + +Status HandleParsedStringsValue(ParsedStringsValue& parsed_strings_value, std::string* value, int64_t* ttl) { + if (parsed_strings_value.IsStale()) { + ClearValueAndSetTTL(value, ttl, -2); + return Status::NotFound("Stale"); + } else { + parsed_strings_value.StripSuffix(); + int64_t expiry_time = parsed_strings_value.Etime(); + *ttl = (expiry_time == 0) ? -1 : CalculateTTL(expiry_time); + } + return Status::OK(); +} + Status Redis::GetWithTTL(const Slice& key, std::string* value, int64_t* ttl) { value->clear(); BaseKey base_key(key); Status s = db_->Get(default_read_options_, base_key.Encode(), value); std::string meta_value = *value; + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, meta_value)) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + " get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } + if (s.ok()) { ParsedStringsValue parsed_strings_value(value); - if (parsed_strings_value.IsStale()) { - value->clear(); - *ttl = -2; - return Status::NotFound("Stale"); - } else { - parsed_strings_value.StripSuffix(); - *ttl = parsed_strings_value.Etime(); - if (*ttl == 0) { - *ttl = -1; - } else { - int64_t curtime; - rocksdb::Env::Default()->GetCurrentTime(&curtime); - *ttl = *ttl - curtime >= 0 ? *ttl - curtime : -2; - } - } + return HandleParsedStringsValue(parsed_strings_value, value, ttl); } else if (s.IsNotFound()) { - value->clear(); - *ttl = -2; + ClearValueAndSetTTL(value, ttl, -2); + } + + return s; +} + +Status Redis::MGetWithTTL(const Slice& key, std::string* value, int64_t* ttl) { + value->clear(); + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), value); + std::string meta_value = *value; + + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, meta_value)) { + s = Status::NotFound(); + } + + if (s.ok()) { + ParsedStringsValue parsed_strings_value(value); + return HandleParsedStringsValue(parsed_strings_value, value, ttl); + } else if (s.IsNotFound()) { + ClearValueAndSetTTL(value, ttl, -2); } return s; diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index eff2a82176..ff4378367d 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -155,6 +155,11 @@ Status Storage::GetWithTTL(const Slice& key, std::string* value, int64_t* ttl) { return inst->GetWithTTL(key, value, ttl); } +Status Storage::MGetWithTTL(const Slice& key, std::string* value, int64_t* ttl) { + auto& inst = GetDBInstance(key); + return inst->MGetWithTTL(key, value, ttl); +} + Status Storage::GetSet(const Slice& key, const Slice& value, std::string* old_value) { auto& inst = GetDBInstance(key); return inst->GetSet(key, value, old_value); @@ -208,7 +213,7 @@ Status Storage::MGetWithTTL(const std::vector& keys, std::vectorGetWithTTL(key, &value, &ttl); + s = inst->MGetWithTTL(key, &value, &ttl); if (s.ok()) { vss->push_back({value, Status::OK(), ttl}); } else if (s.IsNotFound()) { From 3bcccd0898a9925d75bd3e8233ea42ed2267fd63 Mon Sep 17 00:00:00 2001 From: Changyuan Ning <77976092+longfar-ncy@users.noreply.github.com> Date: Mon, 17 Jun 2024 20:35:31 +0800 Subject: [PATCH 02/11] fix: pksetexat should update cache (#2736) * fix: pksetexat should update cache * fix: handle error when expire < 0 --- include/pika_kv.h | 2 ++ src/pika_command.cc | 2 +- src/pika_kv.cc | 15 +++++++++++++++ 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/include/pika_kv.h b/include/pika_kv.h index 277a27422f..204fdb1ff2 100644 --- a/include/pika_kv.h +++ b/include/pika_kv.h @@ -792,6 +792,8 @@ class PKSetexAtCmd : public Cmd { return res; } void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; void Split(const HintKeys& hint_keys) override {}; void Merge() override {}; Cmd* Clone() override { return new PKSetexAtCmd(*this); } diff --git a/src/pika_command.cc b/src/pika_command.cc index 81c23c2533..a40cb77f35 100644 --- a/src/pika_command.cc +++ b/src/pika_command.cc @@ -370,7 +370,7 @@ void InitCmdTable(CmdTable* cmd_table) { cmd_table->insert(std::pair>(kCmdNameScanx, std::move(scanxptr))); ////PKSetexAtCmd std::unique_ptr pksetexatptr = std::make_unique( - kCmdNamePKSetexAt, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsSlow); + kCmdNamePKSetexAt, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNamePKSetexAt, std::move(pksetexatptr))); ////PKScanRange std::unique_ptr pkscanrangeptr = std::make_unique( diff --git a/src/pika_kv.cc b/src/pika_kv.cc index 2d0e5c8744..ccc7ea1cfa 100644 --- a/src/pika_kv.cc +++ b/src/pika_kv.cc @@ -1704,6 +1704,21 @@ void PKSetexAtCmd::Do() { } } +void PKSetexAtCmd::DoThroughDB() { + Do(); +} + +void PKSetexAtCmd::DoUpdateCache() { + if (s_.ok()) { + auto expire = time_stamp_ - static_cast(std::time(nullptr)); + if (expire <= 0) [[unlikely]] { + db_->cache()->Del({key_}); + return; + } + db_->cache()->Setxx(key_, value_, expire); + } +} + void PKScanRangeCmd::DoInitial() { if (!CheckArg(argv_.size())) { res_.SetRes(CmdRes::kWrongNum, kCmdNamePKScanRange); From f95f867c6b62cd23e25750771b2408923f6bd058 Mon Sep 17 00:00:00 2001 From: cheniujh <41671101+cheniujh@users.noreply.github.com> Date: Tue, 18 Jun 2024 14:48:26 +0800 Subject: [PATCH 03/11] fix: Revised RocksDB-Related Parameters in Pika (#2728) * 1 set the default value of rate-limiter to 1024GB and make it dynamically changeable 2 allow user to config max-background-flushes and max-background-compactions to -1 while max-background-jobs is given 3 add conf item delayed-write-rate and make it dynamically changeable 4 add conf item max-compaction-bytes and make it dynamically changeable 4 revised the comment of rate-limiter-auto-tuned in pika.conf * fix bugs --------- Co-authored-by: cjh <1271435567@qq.com> --- conf/pika.conf | 51 +++++++++++++++++++++++++++++++++++---------- include/pika_conf.h | 36 +++++++++++++++++++++++++++++--- src/pika_admin.cc | 51 ++++++++++++++++++++++++++++++++++++++++++++- src/pika_conf.cc | 26 +++++++++++++++++++---- src/pika_server.cc | 4 ++-- 5 files changed, 147 insertions(+), 21 deletions(-) diff --git a/conf/pika.conf b/conf/pika.conf index 1a7b815885..1396caf5e5 100644 --- a/conf/pika.conf +++ b/conf/pika.conf @@ -240,7 +240,8 @@ slave-priority : 100 # The disable_auto_compactions option is [true | false] disable_auto_compactions : false -# Rocksdb max_subcompactions +# Rocksdb max_subcompactions, increasing this value can accelerate the exec speed of a single compaction task +# it's recommended to increase it's value if large compaction is found in you instance max-subcompactions : 1 # The minimum disk usage ratio for checking resume. # If the disk usage ratio is lower than min-check-resume-ratio, it will not check resume, only higher will check resume. @@ -352,17 +353,42 @@ compression : snappy # https://github.com/facebook/rocksdb/wiki/Compression #compression_per_level : [none:none:snappy:lz4:lz4] +# The number of rocksdb background threads(sum of max-background-compactions and max-background-flushes) +# If max-background-jobs has a valid value AND both 'max-background-flushs' and 'max-background-compactions' is set to -1, +# then max-background-flushs' and 'max-background-compactions will be auto config by rocksdb, specifically: +# 1/4 of max-background-jobs will be given to max-background-flushs' and the rest(3/4) will be given to 'max-background-compactions'. +# 'max-background-jobs' default value is 3 and the value range is [2, 12]. +max-background-jobs : 3 + # The number of background flushing threads. -# max-background-flushes default value is 1 and the value range is [1, 4]. -max-background-flushes : 1 +# max-background-flushes default value is -1 and the value range is [1, 4] or -1. +# if 'max-background-flushes' is set to -1, the 'max-background-compactions' should also be set to -1, +# which means let rocksdb to auto config them based on the value of 'max-background-jobs' +max-background-flushes : -1 + +# [NOTICE] you MUST NOT set one of the max-background-flushes or max-background-compactions to -1 while setting another one to other values(not -1). +# They SHOULD both be -1 or both not(if you want to config them manually). # The number of background compacting threads. -# max-background-compactions default value is 2 and the value range is [1, 8]. -max-background-compactions : 2 +# max-background-compactions default value is -1 and the value range is [1, 8] or -1. +# if 'max-background-compactions' is set to -1, the 'max-background-flushes' should also be set to -1, +# which means let rocksdb to auto config them based on the value of 'max-background-jobs' +max-background-compactions : -1 + +# RocksDB delayed-write-rate, default is 0(infer from rate-limiter by RocksDB) +# Ref from rocksdb: Whenever stall conditions are triggered, RocksDB will reduce write rate to delayed_write_rate, +# and could possibly reduce write rate to even lower than delayed_write_rate if estimated pending compaction bytes accumulates. +# If the value is 0, RcoksDB will infer a value from `rater_limiter` value if it is not empty, or 16MB if `rater_limiter` is empty. +# Note that if users change the rate in `rate_limiter` after DB is opened, delayed_write_rate won't be adjusted. +# [Support Dynamically changeable] send 'config set delayed-write-rate' to a running pika can change it's value dynamically +delayed-write-rate : 0 + + +# RocksDB will try to limit number of bytes in one compaction to be lower than this max-compaction-bytes. +# But it's NOT guaranteed. +# default value is -1, means let it be 25 * target-file-size-base (Which is RocksDB's default value) +max-compaction-bytes : -1 -# The number of background threads. -# max-background-jobs default value is 3 and the value range is [2, 12]. -max-background-jobs : 3 # maximum value of RocksDB cached open file descriptors max-cache-files : 5000 @@ -428,14 +454,17 @@ default-slot-num : 1024 # 0: Read 1: Write 2: ReadAndWrite # rate-limiter-mode : default 1 -# rate limiter bandwidth, default 2000MB/s -#rate-limiter-bandwidth : 2097152000 +# rate limiter bandwidth, units in bytes, default 1024GB/s (No limit) +# [Support Dynamically changeable] send 'rate-limiter-bandwidth' to a running pika can change it's value dynamically +#rate-limiter-bandwidth : 1099511627776 #rate-limiter-refill-period-us : 100000 # #rate-limiter-fairness: 10 -# rate limiter auto tune https://rocksdb.org/blog/2017/12/18/17-auto-tuned-rate-limiter.html. the default value is false. +# if auto_tuned is true: Enables dynamic adjustment of rate limit within the range +#`[rate-limiter-bandwidth / 20, rate-limiter-bandwidth]`, according to the recent demand for background I/O. +# rate limiter auto tune https://rocksdb.org/blog/2017/12/18/17-auto-tuned-rate-limiter.html. the default value is true. #rate-limiter-auto-tuned : true ################################## RocksDB Blob Configure ##################### diff --git a/include/pika_conf.h b/include/pika_conf.h index e0cb81062d..d55b45e027 100644 --- a/include/pika_conf.h +++ b/include/pika_conf.h @@ -255,6 +255,12 @@ class PikaConf : public pstd::BaseConf { std::shared_lock l(rwlock_); return target_file_size_base_; } + + uint64_t max_compaction_bytes() { + std::shared_lock l(rwlock_); + return static_cast(max_compaction_bytes_); + } + int max_cache_statistic_keys() { std::shared_lock l(rwlock_); return max_cache_statistic_keys_; @@ -279,6 +285,10 @@ class PikaConf : public pstd::BaseConf { std::shared_lock l(rwlock_); return max_background_jobs_; } + uint64_t delayed_write_rate(){ + std::shared_lock l(rwlock_); + return static_cast(delayed_write_rate_); + } int max_cache_files() { std::shared_lock l(rwlock_); return max_cache_files_; @@ -723,6 +733,24 @@ class PikaConf : public pstd::BaseConf { arena_block_size_ = value; } + void SetRateLmiterBandwidth(int64_t value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("rate-limiter-bandwidth", std::to_string(value)); + rate_limiter_bandwidth_ = value; + } + + void SetDelayedWriteRate(int64_t value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("delayed-write-rate", std::to_string(value)); + delayed_write_rate_ = value; + } + + void SetMaxCompactionBytes(int64_t value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-compaction-bytes", std::to_string(value)); + max_compaction_bytes_ = value; + } + void SetLogLevel(const std::string& value) { std::lock_guard l(rwlock_); TryPushDiffCommands("loglevel", value); @@ -862,9 +890,10 @@ class PikaConf : public pstd::BaseConf { int max_cache_statistic_keys_ = 0; int small_compaction_threshold_ = 0; int small_compaction_duration_threshold_ = 0; - int max_background_flushes_ = 1; - int max_background_compactions_ = 2; + int max_background_flushes_ = -1; + int max_background_compactions_ = -1; int max_background_jobs_ = 0; + int64_t delayed_write_rate_ = 0; int max_cache_files_ = 0; std::atomic rocksdb_ttl_second_ = 0; std::atomic rocksdb_periodic_second_ = 0; @@ -908,6 +937,7 @@ class PikaConf : public pstd::BaseConf { // bool write_binlog_ = false; int target_file_size_base_ = 0; + int64_t max_compaction_bytes_ = 0; int binlog_file_size_ = 0; // cache @@ -942,7 +972,7 @@ class PikaConf : public pstd::BaseConf { std::shared_mutex rwlock_; // Rsync Rate limiting configuration - int throttle_bytes_per_second_ = 207200000; + int throttle_bytes_per_second_ = 200 << 20; // 200MB/s int max_rsync_parallel_num_ = kMaxRsyncParallelNum; std::atomic_int64_t rsync_timeout_ms_ = 1000; }; diff --git a/src/pika_admin.cc b/src/pika_admin.cc index 18b5e89873..bb52159dd6 100644 --- a/src/pika_admin.cc +++ b/src/pika_admin.cc @@ -1929,6 +1929,18 @@ void ConfigCmd::ConfigGet(std::string& ret) { EncodeNumber(&config_body, g_pika_conf->rate_limiter_bandwidth()); } + if (pstd::stringmatch(pattern.data(), "delayed-write-rate", 1) != 0) { + elements += 2; + EncodeString(&config_body, "delayed-write-rate"); + EncodeNumber(&config_body, g_pika_conf->delayed_write_rate()); + } + + if (pstd::stringmatch(pattern.data(), "max-compaction-bytes", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-compaction-bytes"); + EncodeNumber(&config_body, g_pika_conf->max_compaction_bytes()); + } + if (pstd::stringmatch(pattern.data(), "rate-limiter-refill-period-us", 1) != 0) { elements += 2; EncodeString(&config_body, "rate-limiter-refill-period-us"); @@ -2342,6 +2354,43 @@ void ConfigCmd::ConfigSet(std::shared_ptr db) { } g_pika_conf->SetDisableAutoCompaction(value); res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "rate-limiter-bandwidth") { + int64_t new_bandwidth = 0; + if (pstd::string2int(value.data(), value.size(), &new_bandwidth) == 0 || new_bandwidth <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'rate-limiter-bandwidth'\r\n"); + return; + } + g_pika_server->storage_options().options.rate_limiter->SetBytesPerSecond(new_bandwidth); + g_pika_conf->SetRateLmiterBandwidth(new_bandwidth); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "delayed-write-rate") { + int64_t new_delayed_write_rate = 0; + if (pstd::string2int(value.data(), value.size(), &new_delayed_write_rate) == 0 || new_delayed_write_rate <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'delayed-write-rate'\r\n"); + return; + } + std::unordered_map options_map{{"delayed_write_rate", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kDB, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set delayed-write-rate wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetDelayedWriteRate(new_delayed_write_rate); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-compaction-bytes") { + int64_t new_max_compaction_bytes = 0; + if (pstd::string2int(value.data(), value.size(), &new_max_compaction_bytes) == 0 || new_max_compaction_bytes <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-compaction-bytes'\r\n"); + return; + } + std::unordered_map options_map{{"max_compaction_bytes", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set max-compaction-bytes wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetMaxCompactionBytes(new_max_compaction_bytes); + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "max-client-response-size") { if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival < 0) { res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-client-response-size'\r\n"); @@ -2461,7 +2510,7 @@ void ConfigCmd::ConfigSet(std::shared_ptr db) { g_pika_conf->SetMaxCacheFiles(static_cast(ival)); res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "max-background-compactions") { - if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + if (pstd::string2int(value.data(), value.size(), &ival) == 0 || ival <= 0) { res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-background-compactions'\r\n"); return; } diff --git a/src/pika_conf.cc b/src/pika_conf.cc index c88e77478b..4ca6710b60 100644 --- a/src/pika_conf.cc +++ b/src/pika_conf.cc @@ -355,7 +355,7 @@ int PikaConf::Load() { // rate-limiter-bandwidth GetConfInt64("rate-limiter-bandwidth", &rate_limiter_bandwidth_); if (rate_limiter_bandwidth_ <= 0) { - rate_limiter_bandwidth_ = 2000 * 1024 * 1024; // 2000MB/s + rate_limiter_bandwidth_ = 1024LL << 30; // 1024GB/s } // rate-limiter-refill-period-us @@ -372,6 +372,7 @@ int PikaConf::Load() { std::string at; GetConfStr("rate-limiter-auto-tuned", &at); + // rate_limiter_auto_tuned_ will be true if user didn't config rate_limiter_auto_tuned_ = at == "yes" || at.empty(); // max_write_buffer_num @@ -393,6 +394,12 @@ int PikaConf::Load() { target_file_size_base_ = 1048576; // 10Mb } + GetConfInt64("max-compaction-bytes", &max_compaction_bytes_); + if (max_compaction_bytes_ <= 0) { + // RocksDB's default is 25 * target_file_size_base_ + max_compaction_bytes_ = target_file_size_base_ * 25; + } + max_cache_statistic_keys_ = 0; GetConfInt("max-cache-statistic-keys", &max_cache_statistic_keys_); if (max_cache_statistic_keys_ <= 0) { @@ -418,8 +425,9 @@ int PikaConf::Load() { small_compaction_duration_threshold_ = 1000000; } + // max-background-flushes and max-background-compactions should both be -1 or both not GetConfInt("max-background-flushes", &max_background_flushes_); - if (max_background_flushes_ <= 0) { + if (max_background_flushes_ <= 0 && max_background_flushes_ != -1) { max_background_flushes_ = 1; } if (max_background_flushes_ >= 6) { @@ -427,7 +435,7 @@ int PikaConf::Load() { } GetConfInt("max-background-compactions", &max_background_compactions_); - if (max_background_compactions_ <= 0) { + if (max_background_compactions_ <= 0 && max_background_compactions_ != -1) { max_background_compactions_ = 2; } if (max_background_compactions_ >= 8) { @@ -443,6 +451,13 @@ int PikaConf::Load() { max_background_jobs_ = (8 + 6); } + GetConfInt64("delayed-write-rate", &delayed_write_rate_); + if (delayed_write_rate_ <= 0) { + // set 0 means let rocksDB infer from rate-limiter(by default, rate-limiter is 1024GB, delayed_write_rate will be 512GB) + // if rate-limiter is nullptr, it would be set to 16MB by RocksDB + delayed_write_rate_ = 0; + } + max_cache_files_ = 5000; GetConfInt("max-cache-files", &max_cache_files_); if (max_cache_files_ < -1) { @@ -651,7 +666,7 @@ int PikaConf::Load() { // throttle-bytes-per-second GetConfInt("throttle-bytes-per-second", &throttle_bytes_per_second_); if (throttle_bytes_per_second_ <= 0) { - throttle_bytes_per_second_ = 207200000; + throttle_bytes_per_second_ = 200LL << 20; //200 MB } GetConfInt("max-rsync-parallel-num", &max_rsync_parallel_num_); @@ -749,6 +764,9 @@ int PikaConf::ConfigRewrite() { SetConfInt("max-cache-files", max_cache_files_); SetConfInt("max-background-compactions", max_background_compactions_); SetConfInt("max-background-jobs", max_background_jobs_); + SetConfInt64("rate-limiter-bandwidth", rate_limiter_bandwidth_); + SetConfInt64("delayed-write-rate", delayed_write_rate_); + SetConfInt64("max-compaction-bytes", max_compaction_bytes_); SetConfInt("max-write-buffer-num", max_write_buffer_num_); SetConfInt64("write-buffer-size", write_buffer_size_); SetConfInt("min-write-buffer-number-to-merge", min_write_buffer_number_to_merge_); diff --git a/src/pika_server.cc b/src/pika_server.cc index b5fa4f56d9..5c3aae16df 100644 --- a/src/pika_server.cc +++ b/src/pika_server.cc @@ -1299,10 +1299,12 @@ void PikaServer::InitStorageOptions() { storage_options_.options.max_bytes_for_level_base = g_pika_conf->level0_file_num_compaction_trigger() * g_pika_conf->write_buffer_size(); storage_options_.options.max_subcompactions = g_pika_conf->max_subcompactions(); storage_options_.options.target_file_size_base = g_pika_conf->target_file_size_base(); + storage_options_.options.max_compaction_bytes = g_pika_conf->max_compaction_bytes(); storage_options_.options.max_background_flushes = g_pika_conf->max_background_flushes(); storage_options_.options.max_background_compactions = g_pika_conf->max_background_compactions(); storage_options_.options.disable_auto_compactions = g_pika_conf->disable_auto_compactions(); storage_options_.options.max_background_jobs = g_pika_conf->max_background_jobs(); + storage_options_.options.delayed_write_rate = g_pika_conf->delayed_write_rate(); storage_options_.options.max_open_files = g_pika_conf->max_cache_files(); storage_options_.options.max_bytes_for_level_multiplier = g_pika_conf->max_bytes_for_level_multiplier(); storage_options_.options.optimize_filters_for_hits = g_pika_conf->optimize_filters_for_hits(); @@ -1337,7 +1339,6 @@ void PikaServer::InitStorageOptions() { storage_options_.table_options.block_cache = rocksdb::NewLRUCache(storage_options_.block_cache_size, static_cast(g_pika_conf->num_shard_bits())); } - storage_options_.options.rate_limiter = std::shared_ptr( rocksdb::NewGenericRateLimiter( @@ -1347,7 +1348,6 @@ void PikaServer::InitStorageOptions() { static_cast(g_pika_conf->rate_limiter_mode()), g_pika_conf->rate_limiter_auto_tuned() )); - // For Storage small compaction storage_options_.statistics_max_size = g_pika_conf->max_cache_statistic_keys(); storage_options_.small_compaction_threshold = g_pika_conf->small_compaction_threshold(); From e7e2f414035ad0f8bc2b60523dba0adcfa15afc2 Mon Sep 17 00:00:00 2001 From: chejinge <945997690@qq.com> Date: Tue, 18 Jun 2024 18:18:59 +0800 Subject: [PATCH 04/11] fix: slotmigrate return not correct (#2741) * fix: slotmigrate return not correct * fix: slotmigrate return not correct * fix codestyle --------- Co-authored-by: chejinge --- src/pika_migrate_thread.cc | 33 ++++++++++++++++++--------------- src/pika_slot_command.cc | 1 - 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/src/pika_migrate_thread.cc b/src/pika_migrate_thread.cc index a5786381b0..fd221f0b8e 100644 --- a/src/pika_migrate_thread.cc +++ b/src/pika_migrate_thread.cc @@ -635,7 +635,7 @@ bool PikaMigrateThread::ReqMigrateBatch(const std::string &ip, int64_t port, int return false; } -int PikaMigrateThread::ReqMigrateOne(const std::string& key, const std::shared_ptr& db) { +int PikaMigrateThread::ReqMigrateOne(const std::string &key, const std::shared_ptr &db) { std::unique_lock lm(migrator_mutex_); int slot_id = GetSlotID(g_pika_conf->default_slot_num(), key); @@ -653,12 +653,14 @@ int PikaMigrateThread::ReqMigrateOne(const std::string& key, const std::shared_p } key_type = storage::DataTypeToTag(type); if (type == storage::DataType::kNones) { - LOG(WARNING) << "PikaMigrateThread::ReqMigrateOne key: " << key << " type: " << static_cast(type) << " is illegal"; - return -1; + LOG(WARNING) << "PikaMigrateThread::ReqMigrateOne key: " << key << " type: " << static_cast(type) + << " is illegal"; + return 0; } + if (slot_id != slot_id_) { LOG(WARNING) << "PikaMigrateThread::ReqMigrateOne Slot : " << slot_id << " is not the migrating slot:" << slot_id_; - return -2; + return -1; } // if the migrate thread exit, start it @@ -675,17 +677,16 @@ int PikaMigrateThread::ReqMigrateOne(const std::string& key, const std::shared_p is_migrating_ = true; usleep(100); } + } + // check the key is migrating + std::pair kpair = std::make_pair(key_type, key); + if (IsMigrating(kpair)) { + LOG(INFO) << "PikaMigrateThread::ReqMigrateOne key: " << key << " is migrating ! "; + return 1; } else { - // check the key is migrating - std::pair kpair = std::make_pair(key_type, key); - if (IsMigrating(kpair)) { - LOG(INFO) << "PikaMigrateThread::ReqMigrateOne key: " << key << " is migrating ! "; - return 1; - } else { - std::unique_lock lo(mgrtone_queue_mutex_); - mgrtone_queue_.emplace_back(kpair); - NotifyRequestMigrate(); - } + std::unique_lock lo(mgrtone_queue_mutex_); + mgrtone_queue_.emplace_back(kpair); + NotifyRequestMigrate(); } return 1; @@ -934,7 +935,9 @@ void *PikaMigrateThread::ThreadMain() { { std::unique_lock lw(workers_mutex_); while (!should_exit_ && is_task_success_ && send_num_ != response_num_) { - workers_cond_.wait(lw); + if (workers_cond_.wait_for(lw, std::chrono::seconds(60)) == std::cv_status::timeout) { + break; + } } } LOG(INFO) << "PikaMigrateThread::ThreadMain send_num:" << send_num_ << " response_num:" << response_num_; diff --git a/src/pika_slot_command.cc b/src/pika_slot_command.cc index 21e325100d..9340a6ebb2 100644 --- a/src/pika_slot_command.cc +++ b/src/pika_slot_command.cc @@ -1440,7 +1440,6 @@ void SlotsMgrtExecWrapperCmd::Do() { int ret = g_pika_server->SlotsMigrateOne(key_, db_); switch (ret) { case 0: - case -2: res_.AppendInteger(0); res_.AppendInteger(0); return; From e7edec67fe675cf0b22e76e9b0e15ec6012b408c Mon Sep 17 00:00:00 2001 From: JayLiu <38887641+luky116@users.noreply.github.com> Date: Wed, 19 Jun 2024 15:35:52 +0800 Subject: [PATCH 05/11] docs: modify run pika in docker readme (#2743) * modify readme * modify readme * modify readme * modify readme --------- Co-authored-by: liuyuecai --- README.md | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 37de45f596..a97f29e074 100644 --- a/README.md +++ b/README.md @@ -252,14 +252,21 @@ Users can directly download the latest binary version package from [releases](ht * #### 3.1 Running with Docker - ```bash + Modify the following configuration items of conf/pika.conf file: + ``` + log-path : /data/log/ + db-path : /data/db/ + db-sync-path : /data/dbsync/ + dump-path : /data/dump/ + ``` + + And then execute the following statement to start pika in docker: + ```bash docker run -d \ --restart=always \ -p 9221:9221 \ - -v :/pika/log \ - -v :/pika/db \ - -v :/pika/dump \ - -v :/pika/dbsync \ + -v "$(pwd)/conf":"/pika/conf" \ + -v "/tmp/pika-data":"/data" \ pikadb/pika:v3.3.6 redis-cli -p 9221 "info" From 09e9673b25dba0fbd72a5301dadc8cc6da96cabd Mon Sep 17 00:00:00 2001 From: chejinge <945997690@qq.com> Date: Wed, 19 Jun 2024 22:44:40 +0800 Subject: [PATCH 06/11] feat:thread purge (#2697) * feat:thread purge --------- Co-authored-by: chejinge --- conf/pika.conf | 5 ++++ include/pika_client_processor.h | 2 -- include/pika_conf.h | 10 ++++++++ include/pika_repl_bgworker.h | 3 +++ include/pika_server.h | 9 +++++-- src/net/include/backend_thread.h | 1 + src/net/include/client_thread.h | 1 + src/net/include/net_thread.h | 2 +- src/net/include/server_thread.h | 2 ++ src/net/src/backend_thread.cc | 2 ++ src/net/src/client_thread.cc | 2 ++ src/net/src/dispatch_thread.cc | 2 +- src/net/src/holy_thread.h | 2 ++ src/net/src/net_thread_name.h | 2 +- src/net/src/net_util.cc | 1 + src/net/src/net_util.h | 1 + src/net/src/thread_pool.cc | 4 ++- src/pika_admin.cc | 20 +++++++++++++++ src/pika_client_processor.cc | 18 ------------- src/pika_conf.cc | 13 +++++++--- src/pika_repl_client.cc | 10 ++++++-- src/pika_repl_server.cc | 3 ++- src/pika_server.cc | 44 ++++++++++++++++++++++---------- src/rsync_client.cc | 1 + src/rsync_server.cc | 3 ++- 25 files changed, 115 insertions(+), 48 deletions(-) diff --git a/conf/pika.conf b/conf/pika.conf index 1396caf5e5..3fcb5d5158 100644 --- a/conf/pika.conf +++ b/conf/pika.conf @@ -27,6 +27,11 @@ thread-num : 1 # are dedicated to handling user requests. thread-pool-size : 12 +# This parameter is used to control whether to separate fast and slow commands. +# When slow-cmd-pool is set to yes, fast and slow commands are separated. +# When set to no, they are not separated. +slow-cmd-pool : no + # Size of the low level thread pool, The threads within this pool # are dedicated to handling slow user requests. slow-cmd-thread-pool-size : 1 diff --git a/include/pika_client_processor.h b/include/pika_client_processor.h index a2c628394e..dccd4ef96c 100644 --- a/include/pika_client_processor.h +++ b/include/pika_client_processor.h @@ -19,12 +19,10 @@ class PikaClientProcessor { int Start(); void Stop(); void SchedulePool(net::TaskFunc func, void* arg); - void ScheduleBgThreads(net::TaskFunc func, void* arg, const std::string& hash_str); size_t ThreadPoolCurQueueSize(); size_t ThreadPoolMaxQueueSize(); private: std::unique_ptr pool_; - std::vector> bg_threads_; }; #endif // PIKA_CLIENT_PROCESSOR_H_ diff --git a/include/pika_conf.h b/include/pika_conf.h index d55b45e027..e93a5e7e5b 100644 --- a/include/pika_conf.h +++ b/include/pika_conf.h @@ -186,6 +186,10 @@ class PikaConf : public pstd::BaseConf { std::shared_lock l(rwlock_); return slotmigrate_; } + bool slow_cmd_pool() { + std::shared_lock l(rwlock_); + return slow_cmd_pool_; + } std::string server_id() { std::shared_lock l(rwlock_); return server_id_; @@ -584,6 +588,11 @@ class PikaConf : public pstd::BaseConf { TryPushDiffCommands("slotmigrate", value ? "yes" : "no"); slotmigrate_.store(value); } + void SetSlowCmdPool(const bool value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("slow-cmd-pool", value ? "yes" : "no"); + slow_cmd_pool_.store(value); + } void SetSlotMigrateThreadNum(const int value) { std::lock_guard l(rwlock_); TryPushDiffCommands("slotmigrate-thread-num", std::to_string(value)); @@ -872,6 +881,7 @@ class PikaConf : public pstd::BaseConf { std::string bgsave_path_; std::string bgsave_prefix_; std::string pidfile_; + std::atomic slow_cmd_pool_; std::string compression_; std::string compression_per_level_; diff --git a/include/pika_repl_bgworker.h b/include/pika_repl_bgworker.h index 2401d72009..e9d6a1b034 100644 --- a/include/pika_repl_bgworker.h +++ b/include/pika_repl_bgworker.h @@ -28,6 +28,9 @@ class PikaReplBgWorker { void QueueClear(); static void HandleBGWorkerWriteBinlog(void* arg); static void HandleBGWorkerWriteDB(void* arg); + void SetThreadName(const std::string& thread_name) { + bg_thread_.set_thread_name(thread_name); + } BinlogItem binlog_item_; net::RedisParser redis_parser_; diff --git a/include/pika_server.h b/include/pika_server.h index 4811c54045..480ba5c17e 100644 --- a/include/pika_server.h +++ b/include/pika_server.h @@ -97,6 +97,7 @@ class PikaServer : public pstd::noncopyable { bool force_full_sync(); void SetForceFullSync(bool v); void SetDispatchQueueLimit(int queue_limit); + void SetSlowCmdThreadPoolFlag(bool flag); storage::StorageOptions storage_options(); std::unique_ptr& pika_dispatch_thread() { return pika_dispatch_thread_; @@ -170,7 +171,6 @@ class PikaServer : public pstd::noncopyable { void FinishMetaSync(); bool MetaSyncDone(); void ResetMetaSyncStatus(); - void SetLoopDBStateMachine(bool need_loop); int GetMetaSyncTimestamp(); void UpdateMetaSyncTimestamp(); void UpdateMetaSyncTimestampWithoutLock(); @@ -181,7 +181,7 @@ class PikaServer : public pstd::noncopyable { * PikaClientProcessor Process Task */ void ScheduleClientPool(net::TaskFunc func, void* arg, bool is_slow_cmd); - void ScheduleClientBgThreads(net::TaskFunc func, void* arg, const std::string& hash_str); + // for info debug size_t ClientProcessorThreadPoolCurQueueSize(); size_t ClientProcessorThreadPoolMaxQueueSize(); @@ -644,6 +644,11 @@ class PikaServer : public pstd::noncopyable { * acl */ std::unique_ptr<::Acl> acl_ = nullptr; + + /* + * fast and slow thread pools + */ + bool slow_cmd_thread_pool_flag_; }; #endif diff --git a/src/net/include/backend_thread.h b/src/net/include/backend_thread.h index 6e39583014..b374ec86c6 100644 --- a/src/net/include/backend_thread.h +++ b/src/net/include/backend_thread.h @@ -110,6 +110,7 @@ class BackendThread : public Thread { */ int StartThread() override; int StopThread() override; + void set_thread_name(const std::string& name) override { Thread::set_thread_name(name); } pstd::Status Write(int fd, const std::string& msg); pstd::Status Close(int fd); // Try to connect fd noblock, if return EINPROGRESS or EAGAIN or EWOULDBLOCK diff --git a/src/net/include/client_thread.h b/src/net/include/client_thread.h index 25846555c2..c57174724d 100644 --- a/src/net/include/client_thread.h +++ b/src/net/include/client_thread.h @@ -110,6 +110,7 @@ class ClientThread : public Thread { */ int StartThread() override; int StopThread() override; + void set_thread_name(const std::string& name) override { Thread::set_thread_name(name); } pstd::Status Write(const std::string& ip, int port, const std::string& msg); pstd::Status Close(const std::string& ip, int port); diff --git a/src/net/include/net_thread.h b/src/net/include/net_thread.h index ac700819a5..ff96811e91 100644 --- a/src/net/include/net_thread.h +++ b/src/net/include/net_thread.h @@ -34,7 +34,7 @@ class Thread : public pstd::noncopyable { std::string thread_name() const { return thread_name_; } - void set_thread_name(const std::string& name) { thread_name_ = name; } + virtual void set_thread_name(const std::string& name) { thread_name_ = name; } protected: std::atomic_bool should_stop_; diff --git a/src/net/include/server_thread.h b/src/net/include/server_thread.h index d0d6d63612..b8defbf2a6 100644 --- a/src/net/include/server_thread.h +++ b/src/net/include/server_thread.h @@ -150,6 +150,8 @@ class ServerThread : public Thread { // Move into server thread virtual void MoveConnIn(std::shared_ptr conn, const NotifyType& type) = 0; + void set_thread_name(const std::string& name) override { Thread::set_thread_name(name); } + virtual void KillAllConns() = 0; virtual bool KillConn(const std::string& ip_port) = 0; diff --git a/src/net/src/backend_thread.cc b/src/net/src/backend_thread.cc index b0eaa53687..27389293d7 100644 --- a/src/net/src/backend_thread.cc +++ b/src/net/src/backend_thread.cc @@ -48,6 +48,8 @@ int BackendThread::StartThread() { if (res) { return res; } + set_thread_name("BackendThread"); + return Thread::StartThread(); } diff --git a/src/net/src/client_thread.cc b/src/net/src/client_thread.cc index 916fd8f6ee..5561d6d3c0 100644 --- a/src/net/src/client_thread.cc +++ b/src/net/src/client_thread.cc @@ -47,6 +47,8 @@ int ClientThread::StartThread() { if (res) { return res; } + set_thread_name("ClientThread"); + return Thread::StartThread(); } diff --git a/src/net/src/dispatch_thread.cc b/src/net/src/dispatch_thread.cc index d98c44b68b..922688c178 100644 --- a/src/net/src/dispatch_thread.cc +++ b/src/net/src/dispatch_thread.cc @@ -66,7 +66,7 @@ int DispatchThread::StartThread() { // Adding timer tasks and run timertaskThread timerTaskThread_.AddTimerTask("blrpop_blocking_info_scan", 250, true, [this] { this->ScanExpiredBlockedConnsOfBlrpop(); }); - + timerTaskThread_.set_thread_name("TimerTaskThread"); timerTaskThread_.StartThread(); return ServerThread::StartThread(); } diff --git a/src/net/src/holy_thread.h b/src/net/src/holy_thread.h index 0b4f0d700b..312de4c84f 100644 --- a/src/net/src/holy_thread.h +++ b/src/net/src/holy_thread.h @@ -35,6 +35,8 @@ class HolyThread : public ServerThread { int StopThread() override; + void set_thread_name(const std::string& name) override { Thread::set_thread_name(name); } + void set_keepalive_timeout(int timeout) override { keepalive_timeout_ = timeout; } int conn_num() const override; diff --git a/src/net/src/net_thread_name.h b/src/net/src/net_thread_name.h index e85cd1a6df..5d8dc78db8 100644 --- a/src/net/src/net_thread_name.h +++ b/src/net/src/net_thread_name.h @@ -26,7 +26,7 @@ inline bool SetThreadName(pthread_t id, const std::string& name) { #else inline bool SetThreadName(pthread_t id, const std::string& name) { // printf ("no pthread_setname\n"); - return false; + return pthread_setname_np(name.c_str()) == 0; } #endif } // namespace net diff --git a/src/net/src/net_util.cc b/src/net/src/net_util.cc index 6f1f4692d0..7efbb0f6cd 100644 --- a/src/net/src/net_util.cc +++ b/src/net/src/net_util.cc @@ -126,6 +126,7 @@ int TimerTaskThread::StartThread() { // if there is no timer task registered, no need of start the thread return -1; } + set_thread_name("TimerTask"); LOG(INFO) << "TimerTaskThread Starting..."; return Thread::StartThread(); } diff --git a/src/net/src/net_util.h b/src/net/src/net_util.h index a6fcbdc932..fe96e0a950 100644 --- a/src/net/src/net_util.h +++ b/src/net/src/net_util.h @@ -80,6 +80,7 @@ class TimerTaskThread : public Thread { ~TimerTaskThread() override; int StartThread() override; int StopThread() override; + void set_thread_name(const std::string& name) override { Thread::set_thread_name(name); } uint32_t AddTimerTask(const std::string& task_name, int interval_ms, bool repeat_exec, const std::function &task){ return timer_task_manager_.AddTimerTask(task_name, interval_ms, repeat_exec, task); diff --git a/src/net/src/thread_pool.cc b/src/net/src/thread_pool.cc index 4ea4b82125..8e20694244 100644 --- a/src/net/src/thread_pool.cc +++ b/src/net/src/thread_pool.cc @@ -8,6 +8,7 @@ #include +#include #include namespace net { @@ -24,7 +25,8 @@ int ThreadPool::Worker::start() { return -1; } else { start_.store(true); - SetThreadName(thread_id_, thread_pool_->thread_pool_name() + "Worker"); + std::string thread_id_str = std::to_string(reinterpret_cast(thread_id_)); + SetThreadName(thread_id_, thread_pool_->thread_pool_name() + "_Worker_" + thread_id_str); } } return 0; diff --git a/src/pika_admin.cc b/src/pika_admin.cc index bb52159dd6..c47a90649b 100644 --- a/src/pika_admin.cc +++ b/src/pika_admin.cc @@ -1602,6 +1602,12 @@ void ConfigCmd::ConfigGet(std::string& ret) { EncodeString(&config_body, g_pika_conf->slotmigrate() ? "yes" : "no"); } + if (pstd::stringmatch(pattern.data(), "slow-cmd-pool", 1)) { + elements += 2; + EncodeString(&config_body, "slow-cmd-pool"); + EncodeString(&config_body, g_pika_conf->slow_cmd_pool() ? "yes" : "no"); + } + if (pstd::stringmatch(pattern.data(), "slotmigrate-thread-num", 1)!= 0) { elements += 2; EncodeString(&config_body, "slotmigrate-thread-num"); @@ -2143,6 +2149,7 @@ void ConfigCmd::ConfigSet(std::shared_ptr db) { "requirepass", "masterauth", "slotmigrate", + "slow-cmd-pool", "slotmigrate-thread-num", "thread-migrate-keys-num", "userpass", @@ -2302,6 +2309,19 @@ void ConfigCmd::ConfigSet(std::shared_ptr db) { } g_pika_conf->SetSlotMigrate(slotmigrate); res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "slow_cmd_pool") { + bool SlowCmdPool; + if (value == "yes") { + SlowCmdPool = true; + } else if (value == "no") { + SlowCmdPool = false; + } else { + res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slow-cmd-pool'\r\n"); + return; + } + g_pika_conf->SetSlowCmdPool(SlowCmdPool); + g_pika_server->SetSlowCmdThreadPoolFlag(SlowCmdPool); + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "slowlog-log-slower-than") { if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival < 0) { res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slowlog-log-slower-than'\r\n"); diff --git a/src/pika_client_processor.cc b/src/pika_client_processor.cc index 8a26ccd4a4..5a1c60cee0 100644 --- a/src/pika_client_processor.cc +++ b/src/pika_client_processor.cc @@ -9,10 +9,6 @@ PikaClientProcessor::PikaClientProcessor(size_t worker_num, size_t max_queue_size, const std::string& name_prefix) { pool_ = std::make_unique(worker_num, max_queue_size, name_prefix + "Pool"); - for (size_t i = 0; i < worker_num; ++i) { - bg_threads_.push_back(std::make_unique(max_queue_size)); - bg_threads_.back()->set_thread_name(name_prefix + "BgThread"); - } } PikaClientProcessor::~PikaClientProcessor() { @@ -24,29 +20,15 @@ int PikaClientProcessor::Start() { if (res != net::kSuccess) { return res; } - for (auto& bg_thread : bg_threads_) { - res = bg_thread->StartThread(); - if (res != net::kSuccess) { - return res; - } - } return res; } void PikaClientProcessor::Stop() { pool_->stop_thread_pool(); - for (auto & bg_thread : bg_threads_) { - bg_thread->StopThread(); - } } void PikaClientProcessor::SchedulePool(net::TaskFunc func, void* arg) { pool_->Schedule(func, arg); } -void PikaClientProcessor::ScheduleBgThreads(net::TaskFunc func, void* arg, const std::string& hash_str) { - std::size_t index = std::hash{}(hash_str) % bg_threads_.size(); - bg_threads_[index]->Schedule(func, arg); -} - size_t PikaClientProcessor::ThreadPoolCurQueueSize() { size_t cur_size = 0; if (pool_) { diff --git a/src/pika_conf.cc b/src/pika_conf.cc index 4ca6710b60..3d54e3e895 100644 --- a/src/pika_conf.cc +++ b/src/pika_conf.cc @@ -66,6 +66,11 @@ int PikaConf::Load() { GetConfStr("slotmigrate", &smgrt); slotmigrate_.store(smgrt == "yes" ? true : false); + // slow cmd thread pool + std::string slowcmdpool; + GetConfStr("slow-cmd-pool", &slowcmdpool); + slow_cmd_pool_.store(slowcmdpool == "yes" ? true : false); + int binlog_writer_num = 1; GetConfInt("binlog-writer-num", &binlog_writer_num); if (binlog_writer_num <= 0 || binlog_writer_num > 24) { @@ -154,11 +159,11 @@ int PikaConf::Load() { } GetConfInt("slow-cmd-thread-pool-size", &slow_cmd_thread_pool_size_); - if (slow_cmd_thread_pool_size_ <= 0) { - slow_cmd_thread_pool_size_ = 12; + if (slow_cmd_thread_pool_size_ < 0) { + slow_cmd_thread_pool_size_ = 8; } - if (slow_cmd_thread_pool_size_ > 100) { - slow_cmd_thread_pool_size_ = 100; + if (slow_cmd_thread_pool_size_ > 50) { + slow_cmd_thread_pool_size_ = 50; } std::string slow_cmd_list; diff --git a/src/pika_repl_client.cc b/src/pika_repl_client.cc index 352fbdf7e5..2d53be265c 100644 --- a/src/pika_repl_client.cc +++ b/src/pika_repl_client.cc @@ -28,10 +28,16 @@ PikaReplClient::PikaReplClient(int cron_interval, int keepalive_timeout) { client_thread_ = std::make_unique(cron_interval, keepalive_timeout); client_thread_->set_thread_name("PikaReplClient"); for (int i = 0; i < g_pika_conf->sync_binlog_thread_num(); i++) { - write_binlog_workers_.emplace_back(std::make_unique(PIKA_SYNC_BUFFER_SIZE)); + auto new_binlog_worker = std::make_unique(PIKA_SYNC_BUFFER_SIZE); + std::string binlog_worker_name = "ReplBinlogWorker" + std::to_string(i); + new_binlog_worker->SetThreadName(binlog_worker_name); + write_binlog_workers_.emplace_back(std::move(new_binlog_worker)); } for (int i = 0; i < g_pika_conf->sync_thread_num(); ++i) { - write_db_workers_.emplace_back(std::make_unique(PIKA_SYNC_BUFFER_SIZE)); + auto new_db_worker = std::make_unique(PIKA_SYNC_BUFFER_SIZE); + std::string db_worker_name = "ReplWriteDBWorker" + std::to_string(i); + new_db_worker->SetThreadName(db_worker_name); + write_db_workers_.emplace_back(std::move(new_db_worker)); } } diff --git a/src/pika_repl_server.cc b/src/pika_repl_server.cc index a99fc18047..b92d239b18 100644 --- a/src/pika_repl_server.cc +++ b/src/pika_repl_server.cc @@ -17,7 +17,7 @@ extern PikaServer* g_pika_server; extern std::unique_ptr g_pika_rm; PikaReplServer::PikaReplServer(const std::set& ips, int port, int cron_interval) { - server_tp_ = std::make_unique(PIKA_REPL_SERVER_TP_SIZE, 100000); + server_tp_ = std::make_unique(PIKA_REPL_SERVER_TP_SIZE, 100000, "PikaReplServer"); pika_repl_server_thread_ = std::make_unique(ips, port, cron_interval); pika_repl_server_thread_->set_thread_name("PikaReplServer"); } @@ -27,6 +27,7 @@ PikaReplServer::~PikaReplServer() { } int PikaReplServer::Start() { + pika_repl_server_thread_->set_thread_name("PikaReplServer"); int res = pika_repl_server_thread_->StartThread(); if (res != net::kSuccess) { LOG(FATAL) << "Start Pika Repl Server Thread Error: " << res diff --git a/src/pika_server.cc b/src/pika_server.cc index 5c3aae16df..eaa73e5749 100644 --- a/src/pika_server.cc +++ b/src/pika_server.cc @@ -43,6 +43,7 @@ void DoPurgeDir(void* arg) { PikaServer::PikaServer() : exit_(false), + slow_cmd_thread_pool_flag_(g_pika_conf->slow_cmd_pool()), last_check_compact_time_({0, 0}), last_check_resume_time_({0, 0}), repl_state_(PIKA_REPL_NO_CONNECT), @@ -100,6 +101,7 @@ PikaServer::PikaServer() } acl_ = std::make_unique<::Acl>(); + SetSlowCmdThreadPoolFlag(g_pika_conf->slow_cmd_pool()); } PikaServer::~PikaServer() { @@ -166,12 +168,6 @@ void PikaServer::Start() { LOG(FATAL) << "Start PikaClientProcessor Error: " << ret << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); } - ret = pika_slow_cmd_thread_pool_->start_thread_pool(); - if (ret != net::kSuccess) { - dbs_.clear(); - LOG(FATAL) << "Start PikaLowLevelThreadPool Error: " << ret - << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); - } ret = pika_dispatch_thread_->StartThread(); if (ret != net::kSuccess) { dbs_.clear(); @@ -205,6 +201,24 @@ void PikaServer::Start() { LOG(INFO) << "Goodbye..."; } +void PikaServer::SetSlowCmdThreadPoolFlag(bool flag) { + slow_cmd_thread_pool_flag_ = flag; + int ret = 0; + if (flag) { + ret = pika_slow_cmd_thread_pool_->start_thread_pool(); + if (ret != net::kSuccess) { + dbs_.clear(); + LOG(ERROR) << "Start PikaLowLevelThreadPool Error: " << ret + << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); + } + } else { + while (SlowCmdThreadPoolCurQueueSize() != 0) { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + pika_slow_cmd_thread_pool_->stop_thread_pool(); + } +} + void PikaServer::Exit() { g_pika_server->DisableCompact(); exit_mutex_.unlock(); @@ -707,17 +721,13 @@ void PikaServer::SetFirstMetaSync(bool v) { } void PikaServer::ScheduleClientPool(net::TaskFunc func, void* arg, bool is_slow_cmd) { - if (is_slow_cmd) { + if (is_slow_cmd && g_pika_conf->slow_cmd_pool()) { pika_slow_cmd_thread_pool_->Schedule(func, arg); return; } pika_client_processor_->SchedulePool(func, arg); } -void PikaServer::ScheduleClientBgThreads(net::TaskFunc func, void* arg, const std::string& hash_str) { - pika_client_processor_->ScheduleBgThreads(func, arg, hash_str); -} - size_t PikaServer::ClientProcessorThreadPoolCurQueueSize() { if (!pika_client_processor_) { return 0; @@ -749,11 +759,13 @@ size_t PikaServer::SlowCmdThreadPoolMaxQueueSize() { } void PikaServer::BGSaveTaskSchedule(net::TaskFunc func, void* arg) { + bgsave_thread_.set_thread_name("BGSaveTask"); bgsave_thread_.StartThread(); bgsave_thread_.Schedule(func, arg); } void PikaServer::PurgelogsTaskSchedule(net::TaskFunc func, void* arg) { + purge_thread_.set_thread_name("PurgelogsTask"); purge_thread_.StartThread(); purge_thread_.Schedule(func, arg); } @@ -764,6 +776,7 @@ void PikaServer::PurgeDir(const std::string& path) { } void PikaServer::PurgeDirTaskSchedule(void (*function)(void*), void* arg) { + purge_thread_.set_thread_name("PurgeDirTask"); purge_thread_.StartThread(); purge_thread_.Schedule(function, arg); } @@ -814,6 +827,7 @@ void PikaServer::TryDBSync(const std::string& ip, int port, const std::string& d } void PikaServer::KeyScanTaskSchedule(net::TaskFunc func, void* arg) { + key_scan_thread_.set_thread_name("KeyScanTask"); key_scan_thread_.StartThread(); key_scan_thread_.Schedule(func, arg); } @@ -1453,6 +1467,7 @@ void PikaServer::Bgslotsreload(const std::shared_ptr& db) { LOG(INFO) << "Start slot reloading"; // Start new thread if needed + bgsave_thread_.set_thread_name("SlotsReload"); bgsave_thread_.StartThread(); bgsave_thread_.Schedule(&DoBgslotsreload, static_cast(this)); } @@ -1520,6 +1535,7 @@ void PikaServer::Bgslotscleanup(std::vector cleanupSlots, const std::shared LOG(INFO) << "Start slot cleanup, slots: " << slotsStr << std::endl; // Start new thread if needed + bgslots_cleanup_thread_.set_thread_name("SlotsCleanup"); bgslots_cleanup_thread_.StartThread(); bgslots_cleanup_thread_.Schedule(&DoBgslotscleanup, static_cast(this)); } @@ -1624,7 +1640,7 @@ void DoBgslotscleanup(void* arg) { void PikaServer::ResetCacheAsync(uint32_t cache_num, std::shared_ptr db, cache::CacheConfig *cache_cfg) { if (PIKA_CACHE_STATUS_OK == db->cache()->CacheStatus() || PIKA_CACHE_STATUS_NONE == db->cache()->CacheStatus()) { - + common_bg_thread_.set_thread_name("ResetCacheTask"); common_bg_thread_.StartThread(); BGCacheTaskArg *arg = new BGCacheTaskArg(); arg->db = db; @@ -1648,7 +1664,7 @@ void PikaServer::ClearCacheDbAsync(std::shared_ptr db) { LOG(WARNING) << "can not clear cache in status: " << db->cache()->CacheStatus(); return; } - + common_bg_thread_.set_thread_name("CacheClearThread"); common_bg_thread_.StartThread(); BGCacheTaskArg *arg = new BGCacheTaskArg(); arg->db = db; @@ -1716,7 +1732,7 @@ void PikaServer::ClearCacheDbAsyncV2(std::shared_ptr db) { LOG(WARNING) << "can not clear cache in status: " << db->cache()->CacheStatus(); return; } - + common_bg_thread_.set_thread_name("V2CacheClearThread"); common_bg_thread_.StartThread(); BGCacheTaskArg *arg = new BGCacheTaskArg(); arg->db = db; diff --git a/src/rsync_client.cc b/src/rsync_client.cc index 0cf683ba75..7def7cbadc 100644 --- a/src/rsync_client.cc +++ b/src/rsync_client.cc @@ -28,6 +28,7 @@ RsyncClient::RsyncClient(const std::string& dir, const std::string& db_name) parallel_num_(g_pika_conf->max_rsync_parallel_num()) { wo_mgr_.reset(new WaitObjectManager()); client_thread_ = std::make_unique(3000, 60, wo_mgr_.get()); + client_thread_->set_thread_name("RsyncClientThread"); work_threads_.resize(GetParallelNum()); finished_work_cnt_.store(0); } diff --git a/src/rsync_server.cc b/src/rsync_server.cc index ea339af59c..5696719980 100644 --- a/src/rsync_server.cc +++ b/src/rsync_server.cc @@ -31,7 +31,7 @@ void RsyncWriteResp(RsyncService::RsyncResponse& response, std::shared_ptr& ips, const int port) { - work_thread_ = std::make_unique(2, 100000); + work_thread_ = std::make_unique(2, 100000, "RsyncServerWork"); rsync_server_thread_ = std::make_unique(ips, port, 1 * 1000, this); } @@ -46,6 +46,7 @@ void RsyncServer::Schedule(net::TaskFunc func, void* arg) { int RsyncServer::Start() { LOG(INFO) << "start RsyncServer ..."; + rsync_server_thread_->set_thread_name("RsyncServerThread"); int res = rsync_server_thread_->StartThread(); if (res != net::kSuccess) { LOG(FATAL) << "Start rsync Server Thread Error. ret_code: " << res << " message: " From 55de8b392b99bb86b9bd1a699e7e922e794c6600 Mon Sep 17 00:00:00 2001 From: cheniujh <41671101+cheniujh@users.noreply.github.com> Date: Thu, 20 Jun 2024 17:51:24 +0800 Subject: [PATCH 07/11] fix: Pika can not exec full-sync when multi slaves connect to the same master within a short time (#2746) * use int64_t instead of int32_t --------- Co-authored-by: cjh <1271435567@qq.com> --- src/pika_server.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/pika_server.cc b/src/pika_server.cc index eaa73e5749..ccdd64499f 100644 --- a/src/pika_server.cc +++ b/src/pika_server.cc @@ -820,7 +820,8 @@ void PikaServer::TryDBSync(const std::string& ip, int port, const std::string& d std::string logger_filename = sync_db->Logger()->filename(); if (pstd::IsDir(bgsave_info.path) != 0 || !pstd::FileExists(NewFileName(logger_filename, bgsave_info.offset.b_offset.filenum)) || - top - bgsave_info.offset.b_offset.filenum > kDBSyncMaxGap) { + static_cast(top) - static_cast(bgsave_info.offset.b_offset.filenum) > + static_cast(kDBSyncMaxGap)) { // Need Bgsave first db->BgSaveDB(); } From 6f85ab51bc71dafe62e9ba35f3c521e76a881f7f Mon Sep 17 00:00:00 2001 From: wangshao1 <30471730+wangshao1@users.noreply.github.com> Date: Thu, 20 Jun 2024 21:22:20 +0800 Subject: [PATCH 08/11] fix: keyspace causes heap-buffer-overflow (#2749) * fix keyspace error about heap-buffer-overflow * fix by ai review comments --------- Co-authored-by: wangshaoyi --- src/pika_admin.cc | 13 +++++++------ src/storage/src/base_value_format.h | 1 + src/storage/src/redis.cc | 4 ++-- src/storage/src/storage.cc | 2 +- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/src/pika_admin.cc b/src/pika_admin.cc index c47a90649b..9e974bd7c1 100644 --- a/src/pika_admin.cc +++ b/src/pika_admin.cc @@ -1190,9 +1190,9 @@ void InfoCmd::InfoKeyspace(std::string& info) { if (argv_.size() > 1 && strcasecmp(argv_[1].data(), kAllSection.data()) == 0) { tmp_stream << "# Start async statistics\r\n"; } else if (argv_.size() == 3 && strcasecmp(argv_[1].data(), kKeyspaceSection.data()) == 0) { - tmp_stream << "# Start async statistics\r\n"; + tmp_stream << "# Start async statistics\r\n"; } else { - tmp_stream << "# Use \"info keyspace 1\" to do async statistics\r\n"; + tmp_stream << "# Use \"info keyspace 1\" to do async statistics\r\n"; } std::shared_lock rwl(g_pika_server->dbs_rw_); for (const auto& db_item : g_pika_server->dbs_) { @@ -1201,7 +1201,8 @@ void InfoCmd::InfoKeyspace(std::string& info) { key_scan_info = db_item.second->GetKeyScanInfo(); key_infos = key_scan_info.key_infos; duration = key_scan_info.duration; - if (key_infos.size() != (size_t)(storage::DataType::kNones)) { + if (key_infos.size() != (size_t)(storage::DataTypeNum)) { + LOG(ERROR) << "key_infos size is not equal with expected, potential data inconsistency"; info.append("info keyspace error\r\n"); return; } @@ -1216,7 +1217,7 @@ void InfoCmd::InfoKeyspace(std::string& info) { tmp_stream << "# Duration: " << std::to_string(duration) + "s" << "\r\n"; } - + tmp_stream << db_name << " Strings_keys=" << key_infos[0].keys << ", expires=" << key_infos[0].expires << ", invalid_keys=" << key_infos[0].invaild_keys << "\r\n"; tmp_stream << db_name << " Hashes_keys=" << key_infos[1].keys << ", expires=" << key_infos[1].expires @@ -2911,8 +2912,8 @@ void DbsizeCmd::Do() { } KeyScanInfo key_scan_info = dbs->GetKeyScanInfo(); std::vector key_infos = key_scan_info.key_infos; - if (key_infos.size() != (size_t)(storage::DataType::kNones)) { - res_.SetRes(CmdRes::kErrOther, "keyspace error"); + if (key_infos.size() != (size_t)(storage::DataTypeNum)) { + res_.SetRes(CmdRes::kErrOther, "Mismatch in expected data types and actual key info count"); return; } uint64_t dbsize = 0; diff --git a/src/storage/src/base_value_format.h b/src/storage/src/base_value_format.h index 4663d3df12..3f0f181f97 100644 --- a/src/storage/src/base_value_format.h +++ b/src/storage/src/base_value_format.h @@ -19,6 +19,7 @@ namespace storage { enum class DataType : uint8_t { kStrings = 0, kHashes = 1, kSets = 2, kLists = 3, kZSets = 4, kStreams = 5, kNones = 6, kAll = 7 }; +constexpr int DataTypeNum = int(DataType::kNones); constexpr char DataTypeTag[] = { 'k', 'h', 's', 'l', 'z', 'x', 'n', 'a'}; constexpr char* DataTypeStrings[] = { "string", "hash", "set", "list", "zset", "streams", "none", "all"}; diff --git a/src/storage/src/redis.cc b/src/storage/src/redis.cc index b5bfb66bd4..8b796c111d 100644 --- a/src/storage/src/redis.cc +++ b/src/storage/src/redis.cc @@ -352,7 +352,7 @@ void Redis::SetCompactRangeOptions(const bool is_canceled) { default_compact_range_options_.canceled = new std::atomic(is_canceled); } else { default_compact_range_options_.canceled->store(is_canceled); - } + } } Status Redis::GetProperty(const std::string& property, uint64_t* out) { @@ -365,7 +365,7 @@ Status Redis::GetProperty(const std::string& property, uint64_t* out) { } Status Redis::ScanKeyNum(std::vector* key_infos) { - key_infos->resize(5); + key_infos->resize(DataTypeNum); rocksdb::Status s; s = ScanStringsKeyNum(&((*key_infos)[0])); if (!s.ok()) { diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index ff4378367d..53454cec53 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -1828,7 +1828,7 @@ uint64_t Storage::GetProperty(const std::string& property) { Status Storage::GetKeyNum(std::vector* key_infos) { KeyInfo key_info; - key_infos->resize(size_t(DataType::kNones)); + key_infos->resize(DataTypeNum); for (const auto& db : insts_) { std::vector db_key_infos; // check the scanner was stopped or not, before scanning the next db From 1c1c113da2e1ca312f2ca9403e44cb402c63fbad Mon Sep 17 00:00:00 2001 From: chejinge <945997690@qq.com> Date: Thu, 20 Jun 2024 21:26:21 +0800 Subject: [PATCH 09/11] fix:not correct used bgsave_info_ (#2745) * fix:not correct used bgsave_info_ * fix:not correct used bgsave_info_ --------- Co-authored-by: chejinge --- include/pika_server.h | 29 ++++++++++++++--------------- src/pika_server.cc | 8 ++++---- 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/include/pika_server.h b/include/pika_server.h index 480ba5c17e..02aaad1bfa 100644 --- a/include/pika_server.h +++ b/include/pika_server.h @@ -310,8 +310,7 @@ class PikaServer : public pstd::noncopyable { bool SlotsMigrateBatch(const std::string &ip, int64_t port, int64_t time_out, int64_t slots, int64_t keys_num, const std::shared_ptr& db); void GetSlotsMgrtSenderStatus(std::string *ip, int64_t* port, int64_t *slot, bool *migrating, int64_t *moved, int64_t *remained); bool SlotsMigrateAsyncCancel(); - std::shared_mutex bgsave_protector_; - BgSaveInfo bgsave_info_; + std::shared_mutex bgslots_protector_; /* * BGSlotsReload used @@ -337,28 +336,28 @@ class PikaServer : public pstd::noncopyable { BGSlotsReload bgslots_reload_; BGSlotsReload bgslots_reload() { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); return bgslots_reload_; } bool GetSlotsreloading() { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); return bgslots_reload_.reloading; } void SetSlotsreloading(bool reloading) { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); bgslots_reload_.reloading = reloading; } void SetSlotsreloadingCursor(int64_t cursor) { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); bgslots_reload_.cursor = cursor; } int64_t GetSlotsreloadingCursor() { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); return bgslots_reload_.cursor; } void SetSlotsreloadingEndTime() { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); bgslots_reload_.end_time = time(nullptr); } void Bgslotsreload(const std::shared_ptr& db); @@ -399,33 +398,33 @@ class PikaServer : public pstd::noncopyable { net::BGThread bgslots_cleanup_thread_; BGSlotsCleanup bgslots_cleanup() { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); return bgslots_cleanup_; } bool GetSlotscleaningup() { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); return bgslots_cleanup_.cleaningup; } void SetSlotscleaningup(bool cleaningup) { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); bgslots_cleanup_.cleaningup = cleaningup; } void SetSlotscleaningupCursor(int64_t cursor) { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); bgslots_cleanup_.cursor = cursor; } void SetCleanupSlots(std::vector cleanup_slots) { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); bgslots_cleanup_.cleanup_slots.swap(cleanup_slots); } std::vector GetCleanupSlots() { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); return bgslots_cleanup_.cleanup_slots; } void Bgslotscleanup(std::vector cleanup_slots, const std::shared_ptr& db); void StopBgslotscleanup() { - std::lock_guard ml(bgsave_protector_); + std::lock_guard ml(bgslots_protector_); bgslots_cleanup_.cleaningup = false; std::vector cleanup_slots; bgslots_cleanup_.cleanup_slots.swap(cleanup_slots); diff --git a/src/pika_server.cc b/src/pika_server.cc index ccdd64499f..450a180012 100644 --- a/src/pika_server.cc +++ b/src/pika_server.cc @@ -1449,8 +1449,8 @@ bool PikaServer::SlotsMigrateAsyncCancel() { void PikaServer::Bgslotsreload(const std::shared_ptr& db) { // Only one thread can go through { - std::lock_guard ml(bgsave_protector_); - if (bgslots_reload_.reloading || bgsave_info_.bgsaving) { + std::lock_guard ml(bgslots_protector_); + if (bgslots_reload_.reloading || db->IsBgSaving()) { return; } bgslots_reload_.reloading = true; @@ -1514,8 +1514,8 @@ void DoBgslotsreload(void* arg) { void PikaServer::Bgslotscleanup(std::vector cleanupSlots, const std::shared_ptr& db) { // Only one thread can go through { - std::lock_guard ml(bgsave_protector_); - if (bgslots_cleanup_.cleaningup || bgslots_reload_.reloading || bgsave_info_.bgsaving) { + std::lock_guard ml(bgslots_protector_); + if (bgslots_cleanup_.cleaningup || bgslots_reload_.reloading || db->IsBgSaving()) { return; } bgslots_cleanup_.cleaningup = true; From 8dea10f4262c0703c71635f3a171c03d12e19495 Mon Sep 17 00:00:00 2001 From: wangshao1 <30471730+wangshao1@users.noreply.github.com> Date: Fri, 21 Jun 2024 16:02:20 +0800 Subject: [PATCH 10/11] fix repleat get meta from rocksdb in ttl/persist/expire/expireat api (#2744) Co-authored-by: wangshaoyi --- src/storage/src/redis.h | 72 +++---- src/storage/src/redis_hashes.cc | 241 ++++++++++++++------- src/storage/src/redis_lists.cc | 240 +++++++++++++++------ src/storage/src/redis_sets.cc | 276 +++++++++++++++++------- src/storage/src/redis_streams.cc | 56 +++-- src/storage/src/redis_strings.cc | 356 +++++++++++++++++++------------ src/storage/src/redis_zsets.cc | 276 +++++++++++++++++------- src/storage/src/storage.cc | 5 +- 8 files changed, 1026 insertions(+), 496 deletions(-) diff --git a/src/storage/src/redis.h b/src/storage/src/redis.h index ad8906ba0c..d818fc3e71 100644 --- a/src/storage/src/redis.h +++ b/src/storage/src/redis.h @@ -117,36 +117,36 @@ class Redis { Status ScanStreamsKeyNum(KeyInfo* key_info); // Keys Commands - virtual Status StringsExpire(const Slice& key, int64_t ttl); - virtual Status HashesExpire(const Slice& key, int64_t ttl); - virtual Status ListsExpire(const Slice& key, int64_t ttl); - virtual Status ZsetsExpire(const Slice& key, int64_t ttl); - virtual Status SetsExpire(const Slice& key, int64_t ttl); - - virtual Status StringsDel(const Slice& key); - virtual Status HashesDel(const Slice& key); - virtual Status ListsDel(const Slice& key); - virtual Status ZsetsDel(const Slice& key); - virtual Status SetsDel(const Slice& key); - virtual Status StreamsDel(const Slice& key); - - virtual Status StringsExpireat(const Slice& key, int64_t timestamp); - virtual Status HashesExpireat(const Slice& key, int64_t timestamp); - virtual Status ListsExpireat(const Slice& key, int64_t timestamp); - virtual Status SetsExpireat(const Slice& key, int64_t timestamp); - virtual Status ZsetsExpireat(const Slice& key, int64_t timestamp); - - virtual Status StringsPersist(const Slice& key); - virtual Status HashesPersist(const Slice& key); - virtual Status ListsPersist(const Slice& key); - virtual Status ZsetsPersist(const Slice& key); - virtual Status SetsPersist(const Slice& key); - - virtual Status StringsTTL(const Slice& key, int64_t* timestamp); - virtual Status HashesTTL(const Slice& key, int64_t* timestamp); - virtual Status ListsTTL(const Slice& key, int64_t* timestamp); - virtual Status ZsetsTTL(const Slice& key, int64_t* timestamp); - virtual Status SetsTTL(const Slice& key, int64_t* timestamp); + virtual Status StringsExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta = {}); + virtual Status HashesExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta = {}); + virtual Status ListsExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta = {}); + virtual Status ZsetsExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta = {}); + virtual Status SetsExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta = {}); + + virtual Status StringsDel(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status HashesDel(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status ListsDel(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status ZsetsDel(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status SetsDel(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status StreamsDel(const Slice& key, std::string&& prefetch_meta = {}); + + virtual Status StringsExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta = {}); + virtual Status HashesExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta = {}); + virtual Status ListsExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta = {}); + virtual Status SetsExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta = {}); + virtual Status ZsetsExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta = {}); + + virtual Status StringsPersist(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status HashesPersist(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status ListsPersist(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status ZsetsPersist(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status SetsPersist(const Slice& key, std::string&& prefetch_meta = {}); + + virtual Status StringsTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta = {}); + virtual Status HashesTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta = {}); + virtual Status ListsTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta = {}); + virtual Status ZsetsTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta = {}); + virtual Status SetsTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta = {}); // Strings Commands Status Append(const Slice& key, const Slice& value, int32_t* ret); @@ -200,7 +200,7 @@ class Redis { Status HIncrby(const Slice& key, const Slice& field, int64_t value, int64_t* ret); Status HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by, std::string* new_value); Status HKeys(const Slice& key, std::vector* fields); - Status HLen(const Slice& key, int32_t* ret); + Status HLen(const Slice& key, int32_t* ret, std::string&& prefetch_meta = {}); Status HMGet(const Slice& key, const std::vector& fields, std::vector* vss); Status HMSet(const Slice& key, const std::vector& fvs); Status HSet(const Slice& key, const Slice& field, const Slice& value, int32_t* res); @@ -246,7 +246,7 @@ class Redis { // Sets Commands Status SAdd(const Slice& key, const std::vector& members, int32_t* ret); - Status SCard(const Slice& key, int32_t* ret); + Status SCard(const Slice& key, int32_t* ret, std::string&& prefetch_meta = {}); Status SDiff(const std::vector& keys, std::vector* members); Status SDiffstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); Status SInter(const std::vector& keys, std::vector* members); @@ -269,7 +269,7 @@ class Redis { Status LIndex(const Slice& key, int64_t index, std::string* element); Status LInsert(const Slice& key, const BeforeOrAfter& before_or_after, const std::string& pivot, const std::string& value, int64_t* ret); - Status LLen(const Slice& key, uint64_t* len); + Status LLen(const Slice& key, uint64_t* len, std::string&& prefetch_meta = {}); Status LPop(const Slice& key, int64_t count, std::vector* elements); Status LPush(const Slice& key, const std::vector& values, uint64_t* ret); Status LPushx(const Slice& key, const std::vector& values, uint64_t* len); @@ -285,7 +285,7 @@ class Redis { // Zsets Commands Status ZAdd(const Slice& key, const std::vector& score_members, int32_t* ret); - Status ZCard(const Slice& key, int32_t* card); + Status ZCard(const Slice& key, int32_t* card, std::string&& prefetch_meta = {}); Status ZCount(const Slice& key, double min, double max, bool left_close, bool right_close, int32_t* ret); Status ZIncrby(const Slice& key, const Slice& member, double increment, double* ret); Status ZRange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members); @@ -323,7 +323,7 @@ class Redis { Status XAdd(const Slice& key, const std::string& serialized_message, StreamAddTrimArgs& args); Status XDel(const Slice& key, const std::vector& ids, int32_t& count); Status XTrim(const Slice& key, StreamAddTrimArgs& args, int32_t& count); - Status XRange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages); + Status XRange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages, std::string&& prefetch_meta = {}); Status XRevrange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages); Status XLen(const Slice& key, int32_t& len); Status XRead(const StreamReadGroupReadArgs& args, std::vector>& results, @@ -333,7 +333,7 @@ class Redis { rocksdb::ReadOptions& read_options); // get and parse the stream meta if found // @return ok only when the stream meta exists - Status GetStreamMeta(StreamMetaValue& tream_meta, const rocksdb::Slice& key, rocksdb::ReadOptions& read_options); + Status GetStreamMeta(StreamMetaValue& tream_meta, const rocksdb::Slice& key, rocksdb::ReadOptions& read_options, std::string&& prefetch_meta = {}); // Before calling this function, the caller should ensure that the ids are valid Status DeleteStreamMessages(const rocksdb::Slice& key, const StreamMetaValue& stream_meta, diff --git a/src/storage/src/redis_hashes.cc b/src/storage/src/redis_hashes.cc index e256757e43..03a3c1c9b8 100644 --- a/src/storage/src/redis_hashes.cc +++ b/src/storage/src/redis_hashes.cc @@ -88,7 +88,10 @@ Status Redis::HDel(const Slice& key, const std::vector& fields, int if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -149,7 +152,10 @@ Status Redis::HGet(const Slice& key, const Slice& field, std::string* value) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -186,7 +192,10 @@ Status Redis::HGetall(const Slice& key, std::vector* fvs) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -226,7 +235,10 @@ Status Redis::HGetallWithTTL(const Slice& key, std::vector* fvs, int if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -281,7 +293,10 @@ Status Redis::HIncrby(const Slice& key, const Slice& field, int64_t value, int64 if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -370,7 +385,10 @@ Status Redis::HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -445,14 +463,16 @@ Status Redis::HKeys(const Slice& key, std::vector* fields) { ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - BaseMetaKey base_meta_key(key); Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -477,17 +497,25 @@ Status Redis::HKeys(const Slice& key, std::vector* fields) { return s; } -Status Redis::HLen(const Slice& key, int32_t* ret) { +Status Redis::HLen(const Slice& key, int32_t* ret, std::string&& prefetch_meta) { *ret = 0; - std::string meta_value; - - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + std::string meta_value(std::move(prefetch_meta)); + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + BaseMetaKey base_meta_key(key); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -523,7 +551,10 @@ Status Redis::HMGet(const Slice& key, const std::vector& fields, st if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -584,7 +615,10 @@ Status Redis::HMSet(const Slice& key, const std::vector& fvs) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -656,7 +690,10 @@ Status Redis::HSet(const Slice& key, const Slice& field, const Slice& value, int if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -728,7 +765,10 @@ Status Redis::HSetnx(const Slice& key, const Slice& field, const Slice& value, i if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -788,7 +828,10 @@ Status Redis::HVals(const Slice& key, std::vector* values) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -848,7 +891,10 @@ Status Redis::HScan(const Slice& key, int64_t cursor, const std::string& pattern if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -922,7 +968,10 @@ Status Redis::HScanx(const Slice& key, const std::string& start_field, const std if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -983,14 +1032,16 @@ Status Redis::PKHScanRange(const Slice& key, const Slice& field_start, const std return Status::InvalidArgument("error in given range"); } - BaseMetaKey base_meta_key(key); Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1052,14 +1103,16 @@ Status Redis::PKHRScanRange(const Slice& key, const Slice& field_start, const st return Status::InvalidArgument("error in given range"); } - BaseMetaKey base_meta_key(key); Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1103,17 +1156,25 @@ Status Redis::PKHRScanRange(const Slice& key, const Slice& field_start, const st return Status::OK(); } -Status Redis::HashesExpire(const Slice& key, int64_t ttl) { - std::string meta_value; +Status Redis::HashesExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1135,17 +1196,25 @@ Status Redis::HashesExpire(const Slice& key, int64_t ttl) { return s; } -Status Redis::HashesDel(const Slice& key) { - std::string meta_value; +Status Redis::HashesDel(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1164,17 +1233,25 @@ Status Redis::HashesDel(const Slice& key) { return s; } -Status Redis::HashesExpireat(const Slice& key, int64_t timestamp) { - std::string meta_value; +Status Redis::HashesExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1195,17 +1272,25 @@ Status Redis::HashesExpireat(const Slice& key, int64_t timestamp) { return s; } -Status Redis::HashesPersist(const Slice& key) { - std::string meta_value; +Status Redis::HashesPersist(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1227,16 +1312,24 @@ Status Redis::HashesPersist(const Slice& key) { return s; } -Status Redis::HashesTTL(const Slice& key, int64_t* timestamp) { - std::string meta_value; - +Status Redis::HashesTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + Status s; BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kHashes)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { diff --git a/src/storage/src/redis_lists.cc b/src/storage/src/redis_lists.cc index 1998a76d23..db007ee2cf 100644 --- a/src/storage/src/redis_lists.cc +++ b/src/storage/src/redis_lists.cc @@ -72,7 +72,10 @@ Status Redis::LIndex(const Slice& key, int64_t index, std::string* element) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -113,7 +116,10 @@ Status Redis::LInsert(const Slice& key, const BeforeOrAfter& before_or_after, co if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -211,17 +217,25 @@ Status Redis::LInsert(const Slice& key, const BeforeOrAfter& before_or_after, co return s; } -Status Redis::LLen(const Slice& key, uint64_t* len) { +Status Redis::LLen(const Slice& key, uint64_t* len, std::string&& prefetch_meta) { *len = 0; - std::string meta_value; + Status s; - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + // meta_value is empty means no meta value get before, + // we should get meta first + std::string meta_value(std::move(prefetch_meta)); + if (meta_value.empty()) { + BaseMetaKey base_meta_key(key); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -253,7 +267,10 @@ Status Redis::LPop(const Slice& key, int64_t count, std::vector* el if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -308,7 +325,10 @@ Status Redis::LPush(const Slice& key, const std::vector& values, ui if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -361,7 +381,10 @@ Status Redis::LPushx(const Slice& key, const std::vector& values, u if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -402,7 +425,10 @@ Status Redis::LRange(const Slice& key, int64_t start, int64_t stop, std::vector< if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -459,7 +485,10 @@ Status Redis::LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, std:: if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -526,7 +555,10 @@ Status Redis::LRem(const Slice& key, int64_t count, const Slice& value, uint64_t if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -655,7 +687,10 @@ Status Redis::LSet(const Slice& key, int64_t index, const Slice& value) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -696,7 +731,10 @@ Status Redis::LTrim(const Slice& key, int64_t start, int64_t stop) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -766,7 +804,10 @@ Status Redis::RPop(const Slice& key, int64_t count, std::vector* el if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -820,7 +861,10 @@ Status Redis::RPoplpush(const Slice& source, const Slice& destination, std::stri if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + destination.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -872,7 +916,10 @@ Status Redis::RPoplpush(const Slice& source, const Slice& destination, std::stri if (ExpectedStale(source_meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + source.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(source_meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + source.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(source_meta_value))]); } } if (s.ok()) { @@ -907,7 +954,10 @@ Status Redis::RPoplpush(const Slice& source, const Slice& destination, std::stri if (ExpectedStale(destination_meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + destination.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(destination_meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(destination_meta_value))]); } } if (s.ok()) { @@ -961,7 +1011,10 @@ Status Redis::RPush(const Slice& key, const std::vector& values, ui if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1014,7 +1067,10 @@ Status Redis::RPushx(const Slice& key, const std::vector& values, u if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1041,17 +1097,25 @@ Status Redis::RPushx(const Slice& key, const std::vector& values, u return s; } -Status Redis::ListsExpire(const Slice& key, int64_t ttl) { - std::string meta_value; +Status Redis::ListsExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1073,17 +1137,25 @@ Status Redis::ListsExpire(const Slice& key, int64_t ttl) { return s; } -Status Redis::ListsDel(const Slice& key) { - std::string meta_value; +Status Redis::ListsDel(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1102,17 +1174,25 @@ Status Redis::ListsDel(const Slice& key) { return s; } -Status Redis::ListsExpireat(const Slice& key, int64_t timestamp) { - std::string meta_value; +Status Redis::ListsExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1133,16 +1213,25 @@ Status Redis::ListsExpireat(const Slice& key, int64_t timestamp) { return s; } -Status Redis::ListsPersist(const Slice& key) { - std::string meta_value; +Status Redis::ListsPersist(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1152,8 +1241,8 @@ Status Redis::ListsPersist(const Slice& key) { } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else { - uint64_t timestamp = parsed_lists_meta_value.Etime(); - if (timestamp == 0) { + // Check if the list has set expiration time before attempting to persist + if (parsed_lists_meta_value.Etime() == 0) { return Status::NotFound("Not have an associated timeout"); } else { parsed_lists_meta_value.SetEtime(0); @@ -1164,16 +1253,24 @@ Status Redis::ListsPersist(const Slice& key) { return s; } -Status Redis::ListsTTL(const Slice& key, int64_t* timestamp) { - std::string meta_value; - +Status Redis::ListsTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kLists)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1185,6 +1282,7 @@ Status Redis::ListsTTL(const Slice& key, int64_t* timestamp) { *timestamp = -2; return Status::NotFound(); } else { + // Return -1 for lists with no set expiration, and calculate remaining time for others *timestamp = parsed_lists_meta_value.Etime(); if (*timestamp == 0) { *timestamp = -1; diff --git a/src/storage/src/redis_sets.cc b/src/storage/src/redis_sets.cc index 9fc400d039..db5044b440 100644 --- a/src/storage/src/redis_sets.cc +++ b/src/storage/src/redis_sets.cc @@ -83,7 +83,10 @@ rocksdb::Status Redis::SAdd(const Slice& key, const std::vector& me if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -146,19 +149,25 @@ rocksdb::Status Redis::SAdd(const Slice& key, const std::vector& me return db_->Write(default_write_options_, &batch); } -rocksdb::Status Redis::SCard(const Slice& key, int32_t* ret) { +rocksdb::Status Redis::SCard(const Slice& key, int32_t* ret, std::string&& meta) { *ret = 0; - std::string meta_value; - - BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + std::string meta_value(std::move(meta)); + rocksdb::Status s; + if (meta_value.empty()) { + BaseMetaKey base_meta_key(key); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } + if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { @@ -195,7 +204,10 @@ rocksdb::Status Redis::SDiff(const std::vector& keys, std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[idx] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -214,7 +226,10 @@ rocksdb::Status Redis::SDiff(const std::vector& keys, std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[0] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -280,7 +295,10 @@ rocksdb::Status Redis::SDiffstore(const Slice& destination, const std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[idx] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -300,7 +318,10 @@ rocksdb::Status Redis::SDiffstore(const Slice& destination, const std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[0] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -346,7 +367,10 @@ rocksdb::Status Redis::SDiffstore(const Slice& destination, const std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -401,7 +425,10 @@ rocksdb::Status Redis::SInter(const std::vector& keys, std::vector< if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + keys[idx] + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[idx] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -424,7 +451,10 @@ rocksdb::Status Redis::SInter(const std::vector& keys, std::vector< if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + keys[0] + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[0] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -496,7 +526,10 @@ rocksdb::Status Redis::SInterstore(const Slice& destination, const std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[idx] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -523,7 +556,10 @@ rocksdb::Status Redis::SInterstore(const Slice& destination, const std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[0] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -575,7 +611,10 @@ rocksdb::Status Redis::SInterstore(const Slice& destination, const std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -624,7 +663,10 @@ rocksdb::Status Redis::SIsmember(const Slice& key, const Slice& member, int32_t* if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -661,14 +703,20 @@ rocksdb::Status Redis::SMembers(const Slice& key, std::vector* memb if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -709,7 +757,10 @@ Status Redis::SMembersWithTTL(const Slice& key, if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -768,7 +819,10 @@ rocksdb::Status Redis::SMove(const Slice& source, const Slice& destination, cons if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + source.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + source.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -811,7 +865,10 @@ rocksdb::Status Redis::SMove(const Slice& source, const Slice& destination, cons if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + destination.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -872,7 +929,10 @@ rocksdb::Status Redis::SPop(const Slice& key, std::vector* members, if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -986,7 +1046,10 @@ rocksdb::Status Redis::SRandmember(const Slice& key, int32_t count, std::vector< if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1057,7 +1120,10 @@ rocksdb::Status Redis::SRem(const Slice& key, const std::vector& me if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1121,7 +1187,10 @@ rocksdb::Status Redis::SUnion(const std::vector& keys, std::vector< if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1178,7 +1247,10 @@ rocksdb::Status Redis::SUnionstore(const Slice& destination, const std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1217,7 +1289,10 @@ rocksdb::Status Redis::SUnionstore(const Slice& destination, const std::vector(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1274,7 +1349,10 @@ rocksdb::Status Redis::SScan(const Slice& key, int64_t cursor, const std::string if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1329,17 +1407,25 @@ rocksdb::Status Redis::SScan(const Slice& key, int64_t cursor, const std::string return rocksdb::Status::OK(); } -rocksdb::Status Redis::SetsExpire(const Slice& key, int64_t ttl) { - std::string meta_value; +rocksdb::Status Redis::SetsExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + rocksdb::Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1361,17 +1447,25 @@ rocksdb::Status Redis::SetsExpire(const Slice& key, int64_t ttl) { return s; } -rocksdb::Status Redis::SetsDel(const Slice& key) { - std::string meta_value; +rocksdb::Status Redis::SetsDel(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - + rocksdb::Status s; BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1390,17 +1484,25 @@ rocksdb::Status Redis::SetsDel(const Slice& key) { return s; } -rocksdb::Status Redis::SetsExpireat(const Slice& key, int64_t timestamp) { - std::string meta_value; +rocksdb::Status Redis::SetsExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1421,17 +1523,25 @@ rocksdb::Status Redis::SetsExpireat(const Slice& key, int64_t timestamp) { return s; } -rocksdb::Status Redis::SetsPersist(const Slice& key) { - std::string meta_value; +rocksdb::Status Redis::SetsPersist(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + rocksdb::Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1453,16 +1563,24 @@ rocksdb::Status Redis::SetsPersist(const Slice& key) { return s; } -rocksdb::Status Redis::SetsTTL(const Slice& key, int64_t* timestamp) { - std::string meta_value; - +rocksdb::Status Redis::SetsTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); BaseMetaKey base_meta_key(key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + rocksdb::Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { diff --git a/src/storage/src/redis_streams.cc b/src/storage/src/redis_streams.cc index 47942244c5..606fb99c05 100644 --- a/src/storage/src/redis_streams.cc +++ b/src/storage/src/redis_streams.cc @@ -171,11 +171,11 @@ Status Redis::XDel(const Slice& key, const std::vector& ids, int32_t& return s; } } - + return db_->Put(default_write_options_, handles_[kMetaCF], BaseMetaKey(key).Encode(), stream_meta.value()); } -Status Redis::XRange(const Slice& key, const StreamScanArgs& args, std::vector& field_values) { +Status Redis::XRange(const Slice& key, const StreamScanArgs& args, std::vector& field_values, std::string&& prefetch_meta) { rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); @@ -184,7 +184,7 @@ Status Redis::XRange(const Slice& key, const StreamScanArgs& args, std::vectorGet(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kStreams, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStreams)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kStreams, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kStreams)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -387,15 +396,24 @@ Status Redis::StreamsDel(const Slice& key) { } Status Redis::GetStreamMeta(StreamMetaValue& stream_meta, const rocksdb::Slice& key, - rocksdb::ReadOptions& read_options) { - std::string value; + rocksdb::ReadOptions& read_options, std::string&& prefetch_meta) { + std::string value(std::move(prefetch_meta)); BaseMetaKey base_meta_key(key); - auto s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &value); - if (s.ok() && !ExpectedMetaValue(DataType::kStreams, value)) { - if (ExpectedStale(value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStreams)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + Status s; + + // value is empty means no meta value get before, + // we should get meta first + if (value.empty()) { + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStreams, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kStreams)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } } } if (s.ok()) { diff --git a/src/storage/src/redis_strings.cc b/src/storage/src/redis_strings.cc index cab41de9aa..007b92f05a 100644 --- a/src/storage/src/redis_strings.cc +++ b/src/storage/src/redis_strings.cc @@ -74,7 +74,10 @@ Status Redis::Append(const Slice& key, const Slice& value, int32_t* ret) { if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -127,7 +130,10 @@ Status Redis::BitCount(const Slice& key, int64_t start_offset, int64_t end_offse if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } if (s.ok()) { @@ -230,7 +236,10 @@ Status Redis::BitOp(BitOpType op, const std::string& dest_key, const std::vector if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + dest_key + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + dest_key + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } if (s.ok()) { @@ -273,7 +282,10 @@ Status Redis::Decrby(const Slice& key, int64_t value, int64_t* ret) { if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -321,7 +333,10 @@ Status Redis::Get(const Slice& key, std::string* value) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -390,7 +405,10 @@ Status Redis::GetWithTTL(const Slice& key, std::string* value, int64_t* ttl) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + " get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + " get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } @@ -435,7 +453,10 @@ Status Redis::GetBit(const Slice& key, int64_t offset, int32_t* ret) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -470,7 +491,10 @@ Status Redis::Getrange(const Slice& key, int64_t start_offset, int64_t end_offse if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } if (s.ok()) { @@ -512,7 +536,10 @@ Status Redis::GetrangeWithValue(const Slice& key, int64_t start_offset, int64_t if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -571,7 +598,10 @@ Status Redis::GetSet(const Slice& key, const Slice& value, std::string* old_valu if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -600,7 +630,10 @@ Status Redis::Incrby(const Slice& key, int64_t value, int64_t* ret) { if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -652,7 +685,10 @@ Status Redis::Incrbyfloat(const Slice& key, const Slice& value, std::string* ret if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -714,20 +750,14 @@ Status Redis::MSetnx(const std::vector& kvs, int32_t* ret) { for (const auto & kv : kvs) { BaseKey base_key(kv.key); s = db_->Get(default_read_options_, base_key.Encode(), &value); - if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { - if (ExpectedStale(value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + kv.key + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); - } + if (!s.ok() && !s.IsNotFound()) { + return s; } - if (s.ok()) { - ParsedStringsValue parsed_strings_value(&value); - if (!parsed_strings_value.IsStale()) { - exists = true; - break; - } + if (s.ok() && !ExpectedStale(value)) { + exists = true; + break; } + // when reaches here, either s is not found or s is ok but expired } if (!exists) { s = MSet(kvs); @@ -758,7 +788,10 @@ Status Redis::Setxx(const Slice& key, const Slice& value, int32_t* ret, int64_t if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -795,7 +828,10 @@ Status Redis::SetBit(const Slice& key, int64_t offset, int32_t on, int32_t* ret) if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok() || s.IsNotFound()) { @@ -860,34 +896,22 @@ Status Redis::Setnx(const Slice& key, const Slice& value, int32_t* ret, int64_t BaseKey base_key(key); ScopeRecordLock l(lock_mgr_, key); Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); - if (s.ok() && !ExpectedMetaValue(DataType::kStrings, old_value)) { - if (ExpectedStale(old_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); - } + if (!s.ok() && !s.IsNotFound()) { + return s; + } + if (s.ok() && !ExpectedStale(old_value)) { + return s; + } + // when reaches here, either s is not found or s is ok but expired + s = Status::NotFound(); + + StringsValue strings_value(value); + if (ttl > 0) { + strings_value.SetRelativeTimestamp(ttl); } + s = db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); if (s.ok()) { - ParsedStringsValue parsed_strings_value(&old_value); - if (parsed_strings_value.IsStale()) { - StringsValue strings_value(value); - if (ttl > 0) { - strings_value.SetRelativeTimestamp(ttl); - } - s = db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); - if (s.ok()) { - *ret = 1; - } - } - } else if (s.IsNotFound()) { - StringsValue strings_value(value); - if (ttl > 0) { - strings_value.SetRelativeTimestamp(ttl); - } - s = db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); - if (s.ok()) { - *ret = 1; - } + *ret = 1; } return s; } @@ -904,7 +928,10 @@ Status Redis::Setvx(const Slice& key, const Slice& value, const Slice& new_value if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -945,7 +972,10 @@ Status Redis::Delvx(const Slice& key, const Slice& value, int32_t* ret) { if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -981,7 +1011,10 @@ Status Redis::Setrange(const Slice& key, int64_t start_offset, const Slice& valu if (ExpectedStale(old_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); } } if (s.ok()) { @@ -1087,7 +1120,10 @@ Status Redis::BitPos(const Slice& key, int32_t bit, int64_t* ret) { if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } if (s.ok()) { @@ -1131,7 +1167,10 @@ Status Redis::BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_ if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } if (s.ok()) { @@ -1188,7 +1227,10 @@ Status Redis::BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_ if (ExpectedStale(value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); } } if (s.ok()) { @@ -1254,17 +1296,25 @@ Status Redis::PKSetexAt(const Slice& key, const Slice& value, int64_t timestamp) return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } -Status Redis::StringsExpire(const Slice& key, int64_t ttl) { - std::string value; +Status Redis::StringsExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta) { + std::string value(std::move(prefetch_meta)); BaseKey base_key(key); ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, base_key.Encode(), &value); - if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { - if (ExpectedStale(value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + Status s; + // value is empty means no meta value get before, + // we should get meta first + if (value.empty()) { + Status s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } } } if (s.ok()) { @@ -1282,17 +1332,25 @@ Status Redis::StringsExpire(const Slice& key, int64_t ttl) { return s; } -Status Redis::StringsDel(const Slice& key) { - std::string value; +Status Redis::StringsDel(const Slice& key, std::string&& prefetch_meta) { + std::string value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseKey base_key(key); - Status s = db_->Get(default_read_options_, base_key.Encode(), &value); - if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { - if (ExpectedStale(value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + Status s; + + // value is empty means no meta value get before, + // we should get meta first + if (value.empty()) { + Status s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } } } if (s.ok()) { @@ -1305,17 +1363,25 @@ Status Redis::StringsDel(const Slice& key) { return s; } -Status Redis::StringsExpireat(const Slice& key, int64_t timestamp) { - std::string value; +Status Redis::StringsExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta) { + std::string value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseKey base_key(key); - Status s = db_->Get(default_read_options_, base_key.Encode(), &value); - if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { - if (ExpectedStale(value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + Status s; + + // value is empty means no meta value get before, + // we should get meta first + if (value.empty()) { + Status s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } } } if (s.ok()) { @@ -1334,17 +1400,25 @@ Status Redis::StringsExpireat(const Slice& key, int64_t timestamp) { return s; } -Status Redis::StringsPersist(const Slice& key) { - std::string value; +Status Redis::StringsPersist(const Slice& key, std::string&& prefetch_meta) { + std::string value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseKey base_key(key); - Status s = db_->Get(default_read_options_, base_key.Encode(), &value); - if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { - if (ExpectedStale(value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + Status s; + + // value is empty means no meta value get before, + // we should get meta first + if (value.empty()) { + s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } } } if (s.ok()) { @@ -1364,17 +1438,25 @@ Status Redis::StringsPersist(const Slice& key) { return s; } -Status Redis::StringsTTL(const Slice& key, int64_t* timestamp) { - std::string value; +Status Redis::StringsTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta) { + std::string value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseKey base_key(key); - Status s = db_->Get(default_read_options_, base_key.Encode(), &value); - if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { - if (ExpectedStale(value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(value))]); + Status s; + + // value is empty means no meta value get before, + // we should get meta first + if (value.empty()) { + s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } } } if (s.ok()) { @@ -1430,7 +1512,6 @@ void Redis::ScanStrings() { rocksdb::Status Redis::Exists(const Slice& key) { std::string meta_value; uint64_t llen = 0; - std::string value; int32_t ret = 0; BaseMetaKey base_meta_key(key); std::vector id_messages; @@ -1442,17 +1523,17 @@ rocksdb::Status Redis::Exists(const Slice& key) { auto type = static_cast(static_cast(meta_value[0])); switch (type) { case DataType::kSets: - return SCard(key, &ret); + return SCard(key, &ret, std::move(meta_value)); case DataType::kZSets: - return ZCard(key, &ret); + return ZCard(key, &ret, std::move(meta_value)); case DataType::kHashes: - return HLen(key, &ret); + return HLen(key, &ret, std::move(meta_value)); case DataType::kLists: - return LLen(key, &llen); - case DataType::kStrings: - return Get(key, &value); + return LLen(key, &llen, std::move(meta_value)); case DataType::kStreams: - return XRange(key, arg, id_messages); + return XRange(key, arg, id_messages, std::move(meta_value)); + case DataType::kStrings: + return ExpectedStale(meta_value) ? rocksdb::Status::NotFound() : rocksdb::Status::OK(); default: return rocksdb::Status::NotFound(); } @@ -1468,17 +1549,17 @@ rocksdb::Status Redis::Del(const Slice& key) { auto type = static_cast(static_cast(meta_value[0])); switch (type) { case DataType::kSets: - return SetsDel(key); + return SetsDel(key, std::move(meta_value)); case DataType::kZSets: - return ZsetsDel(key); + return ZsetsDel(key, std::move(meta_value)); case DataType::kHashes: - return HashesDel(key); + return HashesDel(key, std::move(meta_value)); case DataType::kLists: - return ListsDel(key); + return ListsDel(key, std::move(meta_value)); case DataType::kStrings: - return StringsDel(key); + return StringsDel(key, std::move(meta_value)); case DataType::kStreams: - return StreamsDel(key); + return StreamsDel(key, std::move(meta_value)); default: return rocksdb::Status::NotFound(); } @@ -1494,15 +1575,15 @@ rocksdb::Status Redis::Expire(const Slice& key, int64_t ttl) { auto type = static_cast(static_cast(meta_value[0])); switch (type) { case DataType::kSets: - return SetsExpire(key, ttl); + return SetsExpire(key, ttl, std::move(meta_value)); case DataType::kZSets: - return ZsetsExpire(key, ttl); + return ZsetsExpire(key, ttl, std::move(meta_value)); case DataType::kHashes: - return HashesExpire(key, ttl); + return HashesExpire(key, ttl, std::move(meta_value)); case DataType::kLists: - return ListsExpire(key, ttl); + return ListsExpire(key, ttl, std::move(meta_value)); case DataType::kStrings: - return StringsExpire(key, ttl); + return StringsExpire(key, ttl, std::move(meta_value)); default: return rocksdb::Status::NotFound(); } @@ -1518,15 +1599,15 @@ rocksdb::Status Redis::Expireat(const Slice& key, int64_t ttl) { auto type = static_cast(static_cast(meta_value[0])); switch (type) { case DataType::kSets: - return SetsExpireat(key, ttl); + return SetsExpireat(key, ttl, std::move(meta_value)); case DataType::kZSets: - return ZsetsExpireat(key, ttl); + return ZsetsExpireat(key, ttl, std::move(meta_value)); case DataType::kHashes: - return HashesExpireat(key, ttl); + return HashesExpireat(key, ttl, std::move(meta_value)); case DataType::kLists: - return ListsExpireat(key, ttl); + return ListsExpireat(key, ttl, std::move(meta_value)); case DataType::kStrings: - return StringsExpireat(key, ttl); + return StringsExpireat(key, ttl, std::move(meta_value)); default: return rocksdb::Status::NotFound(); } @@ -1542,15 +1623,15 @@ rocksdb::Status Redis::Persist(const Slice& key) { auto type = static_cast(static_cast(meta_value[0])); switch (type) { case DataType::kSets: - return SetsPersist(key); + return SetsPersist(key, std::move(meta_value)); case DataType::kZSets: - return ZsetsPersist(key); + return ZsetsPersist(key, std::move(meta_value)); case DataType::kHashes: - return HashesPersist(key); + return HashesPersist(key, std::move(meta_value)); case DataType::kLists: - return ListsPersist(key); + return ListsPersist(key, std::move(meta_value)); case DataType::kStrings: - return StringsPersist(key); + return StringsPersist(key, std::move(meta_value)); default: return rocksdb::Status::NotFound(); } @@ -1566,15 +1647,15 @@ rocksdb::Status Redis::TTL(const Slice& key, int64_t* timestamp) { auto type = static_cast(static_cast(meta_value[0])); switch (type) { case DataType::kSets: - return SetsTTL(key, timestamp); + return SetsTTL(key, timestamp, std::move(meta_value)); case DataType::kZSets: - return ZsetsTTL(key, timestamp); + return ZsetsTTL(key, timestamp, std::move(meta_value)); case DataType::kHashes: - return HashesTTL(key, timestamp); + return HashesTTL(key, timestamp, std::move(meta_value)); case DataType::kLists: - return ListsTTL(key, timestamp); + return ListsTTL(key, timestamp, std::move(meta_value)); case DataType::kStrings: - return StringsTTL(key, timestamp); + return StringsTTL(key, timestamp, std::move(meta_value)); default: return rocksdb::Status::NotFound(); } @@ -1597,6 +1678,9 @@ rocksdb::Status Redis::IsExist(const storage::Slice& key) { BaseMetaKey base_meta_key(key); rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { + if (ExpectedStale(meta_value)) { + return Status::NotFound(); + } return Status::OK(); } return rocksdb::Status::NotFound(); diff --git a/src/storage/src/redis_zsets.cc b/src/storage/src/redis_zsets.cc index 503d3710dc..ce89afe885 100644 --- a/src/storage/src/redis_zsets.cc +++ b/src/storage/src/redis_zsets.cc @@ -77,7 +77,10 @@ Status Redis::ZPopMax(const Slice& key, const int64_t count, std::vector(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -132,7 +135,10 @@ Status Redis::ZPopMin(const Slice& key, const int64_t count, std::vector(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -198,7 +204,10 @@ Status Redis::ZAdd(const Slice& key, const std::vector& score_membe if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -284,20 +293,28 @@ Status Redis::ZAdd(const Slice& key, const std::vector& score_membe return s; } -Status Redis::ZCard(const Slice& key, int32_t* card) { +Status Redis::ZCard(const Slice& key, int32_t* card, std::string&& prefetch_meta) { *card = 0; - std::string meta_value; - + Status s; - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + // meta_value is empty means no meta value get before, + // we should get meta first + std::string meta_value(std::move(prefetch_meta)); + if (meta_value.empty()) { + BaseMetaKey base_meta_key(key); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } + if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { @@ -329,7 +346,10 @@ Status Redis::ZCount(const Slice& key, double min, double max, bool left_close, if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -394,7 +414,10 @@ Status Redis::ZIncrby(const Slice& key, const Slice& member, double increment, d if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -470,7 +493,10 @@ Status Redis::ZRange(const Slice& key, int32_t start, int32_t stop, std::vector< if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -525,7 +551,10 @@ Status Redis::ZRangeWithTTL(const Slice& key, int32_t start, int32_t stop, std:: if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -594,7 +623,10 @@ Status Redis::ZRangebyscore(const Slice& key, double min, double max, bool left_ if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -668,7 +700,10 @@ Status Redis::ZRank(const Slice& key, const Slice& member, int32_t* rank) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -727,7 +762,10 @@ Status Redis::ZRem(const Slice& key, const std::vector& members, in if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -787,7 +825,10 @@ Status Redis::ZRemrangebyrank(const Slice& key, int32_t start, int32_t stop, int if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -852,7 +893,10 @@ Status Redis::ZRemrangebyscore(const Slice& key, double min, double max, bool le if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -931,7 +975,10 @@ Status Redis::ZRevrange(const Slice& key, int32_t start, int32_t stop, std::vect if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -986,7 +1033,10 @@ Status Redis::ZRevrangebyscore(const Slice& key, double min, double max, bool le if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1060,7 +1110,10 @@ Status Redis::ZRevrank(const Slice& key, const Slice& member, int32_t* rank) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1111,7 +1164,10 @@ Status Redis::ZScore(const Slice& key, const Slice& member, double* score) { if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1155,7 +1211,10 @@ Status Redis::ZGetAll(const Slice& key, double weight, std::map(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1204,7 +1263,10 @@ Status Redis::ZUnionstore(const Slice& destination, const std::vector(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[idx] + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1255,7 +1317,10 @@ Status Redis::ZUnionstore(const Slice& destination, const std::vector(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1328,7 +1393,10 @@ Status Redis::ZInterstore(const Slice& destination, const std::vector(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[idx] + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1405,7 +1473,10 @@ Status Redis::ZInterstore(const Slice& destination, const std::vector(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1464,7 +1535,10 @@ Status Redis::ZRangebylex(const Slice& key, const Slice& min, const Slice& max, if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1534,7 +1608,10 @@ Status Redis::ZRemrangebylex(const Slice& key, const Slice& min, const Slice& ma if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1593,17 +1670,25 @@ Status Redis::ZRemrangebylex(const Slice& key, const Slice& min, const Slice& ma return s; } -Status Redis::ZsetsExpire(const Slice& key, int64_t ttl) { - std::string meta_value; +Status Redis::ZsetsExpire(const Slice& key, int64_t ttl, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1624,17 +1709,25 @@ Status Redis::ZsetsExpire(const Slice& key, int64_t ttl) { return s; } -Status Redis::ZsetsDel(const Slice& key) { - std::string meta_value; +Status Redis::ZsetsDel(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1653,17 +1746,25 @@ Status Redis::ZsetsDel(const Slice& key) { return s; } -Status Redis::ZsetsExpireat(const Slice& key, int64_t timestamp) { - std::string meta_value; +Status Redis::ZsetsExpireat(const Slice& key, int64_t timestamp, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); ScopeRecordLock l(lock_mgr_, key); - BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1708,7 +1809,10 @@ Status Redis::ZScan(const Slice& key, int64_t cursor, const std::string& pattern if (ExpectedStale(meta_value)) { s = Status::NotFound(); } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); } } if (s.ok()) { @@ -1767,17 +1871,25 @@ Status Redis::ZScan(const Slice& key, int64_t cursor, const std::string& pattern return Status::OK(); } -Status Redis::ZsetsPersist(const Slice& key) { - std::string meta_value; +Status Redis::ZsetsPersist(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); BaseMetaKey base_meta_key(key); ScopeRecordLock l(lock_mgr_, key); + Status s; - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { @@ -1799,16 +1911,24 @@ Status Redis::ZsetsPersist(const Slice& key) { return s; } -Status Redis::ZsetsTTL(const Slice& key, int64_t* timestamp) { - std::string meta_value; - +Status Redis::ZsetsTTL(const Slice& key, int64_t* timestamp, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); BaseMetaKey base_meta_key(key); - Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { - if (ExpectedStale(meta_value)) { - s = Status::NotFound(); - } else { - return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + ", expect type: " + DataTypeStrings[static_cast(DataType::kZSets)] + "get type: " + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } } } if (s.ok()) { diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index 53454cec53..6df8f6eacd 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -238,9 +238,8 @@ Status Storage::MSetnx(const std::vector& kvs, int32_t* ret) { Status s; for (const auto& kv : kvs) { auto& inst = GetDBInstance(kv.key); - std::string value; - s = inst->Get(Slice(kv.key), &value); - if (s.ok() || !s.IsNotFound()) { + s = inst->IsExist(Slice(kv.key)); + if (!s.IsNotFound()) { return s; } } From 3eb2e485be169a889b4f0d0d469f742323564d11 Mon Sep 17 00:00:00 2001 From: wangshao1 <30471730+wangshao1@users.noreply.github.com> Date: Fri, 21 Jun 2024 16:50:19 +0800 Subject: [PATCH 11/11] fix: some streams errors such as pkpatternmatchdel etc (#2726) * fix pkpatternmatchdel error --------- Co-authored-by: wangshaoyi --- src/storage/src/base_filter.h | 26 +- src/storage/src/pika_stream_meta_value.h | 15 +- src/storage/src/redis_strings.cc | 7 +- src/storage/src/storage.cc | 5 +- src/storage/tests/keys_test.cc | 1022 +++++++++++----------- 5 files changed, 548 insertions(+), 527 deletions(-) diff --git a/src/storage/src/base_filter.h b/src/storage/src/base_filter.h index 3a092c109e..5dd17b09c6 100644 --- a/src/storage/src/base_filter.h +++ b/src/storage/src/base_filter.h @@ -16,6 +16,7 @@ #include "src/base_value_format.h" #include "src/base_meta_value_format.h" #include "src/lists_meta_value_format.h" +#include "src/pika_stream_meta_value.h" #include "src/strings_value_format.h" #include "src/zsets_data_key_format.h" #include "src/debug.h" @@ -36,11 +37,12 @@ class BaseMetaFilter : public rocksdb::CompactionFilter { * The field designs of the remaining zset,set,hash and stream in meta-value * are the same, so the same filtering strategy is used */ + ParsedBaseKey parsed_key(key); auto type = static_cast(static_cast(value[0])); DEBUG("==========================START=========================="); if (type == DataType::kStrings) { ParsedStringsValue parsed_strings_value(value); - DEBUG("[StringsFilter] key: {}, value = {}, timestamp: {}, cur_time: {}", key.ToString().c_str(), + DEBUG("[string type] key: %s, value = %s, timestamp: %llu, cur_time: %llu", parsed_key.Key().ToString().c_str(), parsed_strings_value.UserValue().ToString().c_str(), parsed_strings_value.Etime(), cur_time); if (parsed_strings_value.Etime() != 0 && parsed_strings_value.Etime() < cur_time) { DEBUG("Drop[Stale]"); @@ -49,9 +51,17 @@ class BaseMetaFilter : public rocksdb::CompactionFilter { DEBUG("Reserve"); return false; } + } else if (type == DataType::kStreams) { + ParsedStreamMetaValue parsed_stream_meta_value(value); + DEBUG("[stream meta type], key: %s, entries_added = %llu, first_id: %s, last_id: %s, version: %llu", + parsed_key.Key().ToString().c_str(), parsed_stream_meta_value.entries_added(), + parsed_stream_meta_value.first_id().ToString().c_str(), + parsed_stream_meta_value.last_id().ToString().c_str(), + parsed_stream_meta_value.version()); + return false; } else if (type == DataType::kLists) { ParsedListsMetaValue parsed_lists_meta_value(value); - DEBUG("[ListMetaFilter], key: {}, count = {}, timestamp: {}, cur_time: {}, version: {}", key.ToString().c_str(), + DEBUG("[list meta type], key: %s, count = %d, timestamp: %llu, cur_time: %llu, version: %llu", parsed_key.Key().ToString().c_str(), parsed_lists_meta_value.Count(), parsed_lists_meta_value.Etime(), cur_time, parsed_lists_meta_value.Version()); @@ -68,8 +78,9 @@ class BaseMetaFilter : public rocksdb::CompactionFilter { return false; } else { ParsedBaseMetaValue parsed_base_meta_value(value); - DEBUG("[MetaFilter] key: {}, count = {}, timestamp: {}, cur_time: {}, version: {}", key.ToString().c_str(), - parsed_base_meta_value.Count(), parsed_base_meta_value.Etime(), cur_time, parsed_base_meta_value.Version()); + DEBUG("[%s meta type] key: %s, count = %d, timestamp: %llu, cur_time: %llu, version: %llu", + DataTypeToString(type), parsed_key.Key().ToString().c_str(), parsed_base_meta_value.Count(), + parsed_base_meta_value.Etime(), cur_time, parsed_base_meta_value.Version()); if (parsed_base_meta_value.Etime() != 0 && parsed_base_meta_value.Etime() < cur_time && parsed_base_meta_value.Version() < cur_time) { @@ -143,7 +154,12 @@ class BaseDataFilter : public rocksdb::CompactionFilter { auto type = static_cast(static_cast(meta_value[0])); if (type != type_) { return true; - } else if (type == DataType::kHashes || type == DataType::kSets || type == DataType::kStreams || type == DataType::kZSets) { + } else if (type == DataType::kStreams) { + ParsedStreamMetaValue parsed_stream_meta_value(meta_value); + meta_not_found_ = false; + cur_meta_version_ = parsed_stream_meta_value.version(); + cur_meta_etime_ = 0; // stream do not support ttl + } else if (type == DataType::kHashes || type == DataType::kSets || type == DataType::kZSets) { ParsedBaseMetaValue parsed_base_meta_value(&meta_value); meta_not_found_ = false; cur_meta_version_ = parsed_base_meta_value.Version(); diff --git a/src/storage/src/pika_stream_meta_value.h b/src/storage/src/pika_stream_meta_value.h index e010d5c830..d505eb9094 100644 --- a/src/storage/src/pika_stream_meta_value.h +++ b/src/storage/src/pika_stream_meta_value.h @@ -82,7 +82,8 @@ class StreamMetaValue { value_ = std::move(value); assert(value_.size() == kDefaultStreamValueLength); if (value_.size() != kDefaultStreamValueLength) { - LOG(ERROR) << "Invalid stream meta value length: "; + LOG(ERROR) << "Invalid stream meta value length: " << value_.size() + << " expected: " << kDefaultStreamValueLength; return; } char* pos = &value_[0]; @@ -215,7 +216,8 @@ class ParsedStreamMetaValue { ParsedStreamMetaValue(const Slice& value) { assert(value.size() == kDefaultStreamValueLength); if (value.size() != kDefaultStreamValueLength) { - LOG(ERROR) << "Invalid stream meta value length: "; + LOG(ERROR) << "Invalid stream meta value length: " << value.size() + << " expected: " << kDefaultStreamValueLength; return; } char* pos = const_cast(value.data()); @@ -294,7 +296,7 @@ class StreamCGroupMetaValue { uint64_t needed = kDefaultStreamCGroupValueLength; assert(value_.size() == 0); if (value_.size() != 0) { - LOG(FATAL) << "Init on a existed stream cgroup meta value!"; + LOG(ERROR) << "Init on a existed stream cgroup meta value!"; return; } value_.resize(needed); @@ -314,7 +316,8 @@ class StreamCGroupMetaValue { value_ = std::move(value); assert(value_.size() == kDefaultStreamCGroupValueLength); if (value_.size() != kDefaultStreamCGroupValueLength) { - LOG(FATAL) << "Invalid stream cgroup meta value length: "; + LOG(ERROR) << "Invalid stream cgroup meta value length: " << value_.size() + << " expected: " << kDefaultStreamValueLength; return; } if (value_.size() == kDefaultStreamCGroupValueLength) { @@ -373,7 +376,7 @@ class StreamConsumerMetaValue { value_ = std::move(value); assert(value_.size() == kDefaultStreamConsumerValueLength); if (value_.size() != kDefaultStreamConsumerValueLength) { - LOG(FATAL) << "Invalid stream consumer meta value length: " << value_.size() + LOG(ERROR) << "Invalid stream consumer meta value length: " << value_.size() << " expected: " << kDefaultStreamConsumerValueLength; return; } @@ -391,7 +394,7 @@ class StreamConsumerMetaValue { pel_ = pel; assert(value_.size() == 0); if (value_.size() != 0) { - LOG(FATAL) << "Invalid stream consumer meta value length: " << value_.size() << " expected: 0"; + LOG(ERROR) << "Invalid stream consumer meta value length: " << value_.size() << " expected: 0"; return; } uint64_t needed = kDefaultStreamConsumerValueLength; diff --git a/src/storage/src/redis_strings.cc b/src/storage/src/redis_strings.cc index 007b92f05a..970695bf4b 100644 --- a/src/storage/src/redis_strings.cc +++ b/src/storage/src/redis_strings.cc @@ -1703,19 +1703,19 @@ rocksdb::Status Redis::PKPatternMatchDel(const std::string& pattern, int32_t* re rocksdb::WriteBatch batch; rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); iter->SeekToFirst(); - key = iter->key().ToString(); while (iter->Valid()) { auto meta_type = static_cast(static_cast(iter->value()[0])); ParsedBaseMetaKey parsed_meta_key(iter->key().ToString()); + key = iter->key().ToString(); + meta_value = iter->value().ToString(); + if (meta_type == DataType::kStrings) { - meta_value = iter->value().ToString(); ParsedStringsValue parsed_strings_value(&meta_value); if (!parsed_strings_value.IsStale() && (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != 0)) { batch.Delete(key); } } else if (meta_type == DataType::kLists) { - meta_value = iter->value().ToString(); ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (!parsed_lists_meta_value.IsStale() && (parsed_lists_meta_value.Count() != 0U) && (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != @@ -1732,7 +1732,6 @@ rocksdb::Status Redis::PKPatternMatchDel(const std::string& pattern, int32_t* re batch.Put(handles_[kMetaCF], key, stream_meta_value.value()); } } else { - meta_value = iter->value().ToString(); ParsedBaseMetaValue parsed_meta_value(&meta_value); if (!parsed_meta_value.IsStale() && (parsed_meta_value.Count() != 0) && (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index 6df8f6eacd..ddeac6dd37 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -1401,11 +1401,14 @@ Status Storage::PKRScanRange(const DataType& data_type, const Slice& key_start, Status Storage::PKPatternMatchDel(const DataType& data_type, const std::string& pattern, int32_t* ret) { Status s; + *ret = 0; for (const auto& inst : insts_) { - s = inst->PKPatternMatchDel(pattern, ret); + int32_t tmp_ret = 0; + s = inst->PKPatternMatchDel(pattern, &tmp_ret); if (!s.ok()) { return s; } + *ret += tmp_ret; } return s; } diff --git a/src/storage/tests/keys_test.cc b/src/storage/tests/keys_test.cc index e7872c713b..4609da95f2 100644 --- a/src/storage/tests/keys_test.cc +++ b/src/storage/tests/keys_test.cc @@ -2095,517 +2095,517 @@ for (const auto& kv : kvs) { db.Compact(DataType::kAll, true); } -// TEST_F(KeysTest, PKPatternMatchDel) { -// int32_t ret; -// uint64_t ret64; -// int32_t delete_count; -// std::vector keys; -// std::map type_status; - -// //=============================== Strings =============================== - -// // ***************** Group 1 Test ***************** -// db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY1", "VALUE"); -// db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY2", "VALUE"); -// db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY3", "VALUE"); -// db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY4", "VALUE"); -// db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY5", "VALUE"); -// db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY6", "VALUE"); -// s = db.PKPatternMatchDel(DataType::kStrings, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 6); -// keys.clear(); -// db.Keys(DataType::kStrings, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 2 Test ***************** -// db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY1", "VALUE"); -// db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY2", "VALUE"); -// db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY3", "VALUE"); -// db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY4", "VALUE"); -// db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY5", "VALUE"); -// db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY6", "VALUE"); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_STRING_KEY1")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_STRING_KEY3")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_STRING_KEY5")); -// s = db.PKPatternMatchDel(DataType::kStrings, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kStrings, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 3 Test ***************** -// db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY1_0xxx0", "VALUE"); -// db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY2_0ooo0", "VALUE"); -// db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY3_0xxx0", "VALUE"); -// db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY4_0ooo0", "VALUE"); -// db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY5_0xxx0", "VALUE"); -// db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY6_0ooo0", "VALUE"); -// s = db.PKPatternMatchDel(DataType::kStrings, "*0xxx0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kStrings, "*", &keys); -// ASSERT_EQ(keys.size(), 3); -// ASSERT_EQ(keys[0], "GP3_PKPATTERNMATCHDEL_STRING_KEY2_0ooo0"); -// ASSERT_EQ(keys[1], "GP3_PKPATTERNMATCHDEL_STRING_KEY4_0ooo0"); -// ASSERT_EQ(keys[2], "GP3_PKPATTERNMATCHDEL_STRING_KEY6_0ooo0"); -// type_status.clear(); -// db.Del(keys); - -// // ***************** Group 4 Test ***************** -// db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY1", "VALUE"); -// db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY2_0ooo0", "VALUE"); -// db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY3", "VALUE"); -// db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY4_0ooo0", "VALUE"); -// db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY5", "VALUE"); -// db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY6_0ooo0", "VALUE"); -// ASSERT_TRUE(make_expired(&db, "GP4_PKPATTERNMATCHDEL_STRING_KEY1")); -// ASSERT_TRUE(make_expired(&db, "GP4_PKPATTERNMATCHDEL_STRING_KEY3")); -// ASSERT_TRUE(make_expired(&db, "GP4_PKPATTERNMATCHDEL_STRING_KEY5")); -// s = db.PKPatternMatchDel(DataType::kStrings, "*0ooo0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kStrings, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 5 Test ***************** -// size_t gp5_total_kv = 23333; -// for (size_t idx = 0; idx < gp5_total_kv; ++idx) { -// db.Set("GP5_PKPATTERNMATCHDEL_STRING_KEY" + std::to_string(idx), "VALUE"); -// } -// s = db.PKPatternMatchDel(DataType::kStrings, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, gp5_total_kv); -// keys.clear(); -// db.Keys(DataType::kStrings, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// //=============================== Set =============================== - -// // ***************** Group 1 Test ***************** -// db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); -// db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY2", {"M1"}, &ret); -// db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); -// db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY4", {"M1"}, &ret); -// db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); -// db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY6", {"M1"}, &ret); -// s = db.PKPatternMatchDel(DataType::kSets, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 6); -// keys.clear(); -// db.Keys(DataType::kSets, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 2 Test ***************** -// db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); -// db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY2", {"M1"}, &ret); -// db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); -// db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY4", {"M1"}, &ret); -// db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); -// db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY6", {"M1"}, &ret); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_SET_KEY1")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_SET_KEY3")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_SET_KEY5")); -// s = db.PKPatternMatchDel(DataType::kSets, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kSets, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 3 Test ***************** -// db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY1_0xxx0", {"M1"}, &ret); -// db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY2_0ooo0", {"M1"}, &ret); -// db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY3_0xxx0", {"M1"}, &ret); -// db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY4_0ooo0", {"M1"}, &ret); -// db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY5_0xxx0", {"M1"}, &ret); -// db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY6_0ooo0", {"M1"}, &ret); -// s = db.PKPatternMatchDel(DataType::kSets, "*0ooo0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kSets, "*", &keys); -// ASSERT_EQ(keys.size(), 3); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_SET_KEY1_0xxx0", keys[0]); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_SET_KEY3_0xxx0", keys[1]); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_SET_KEY5_0xxx0", keys[2]); -// type_status.clear(); -// db.Del(keys); - -// // ***************** Group 4 Test ***************** -// db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); -// db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY2", {"M1"}, &ret); -// db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); -// db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY4", {"M1"}, &ret); -// db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); -// db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY6", {"M1"}, &ret); -// db.SRem("GP4_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); -// db.SRem("GP4_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); -// db.SRem("GP4_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); -// s = db.PKPatternMatchDel(DataType::kSets, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kSets, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 5 Test ***************** -// db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY1_0ooo0", {"M1"}, &ret); -// db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY2_0xxx0", {"M1"}, &ret); -// db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY3_0ooo0", {"M1"}, &ret); -// db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY4_0xxx0", {"M1"}, &ret); -// db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY5_0ooo0", {"M1"}, &ret); -// db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY6_0xxx0", {"M1"}, &ret); -// db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY7_0ooo0", {"M1"}, &ret); -// db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY8_0xxx0", {"M1"}, &ret); -// ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_SET_KEY1_0ooo0")); -// ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_SET_KEY2_0xxx0")); -// db.SRem("GP5_PKPATTERNMATCHDEL_SET_KEY3_0ooo0", {"M1"}, &ret); -// db.SRem("GP5_PKPATTERNMATCHDEL_SET_KEY4_0xxx0", {"M1"}, &ret); -// s = db.PKPatternMatchDel(DataType::kSets, "*0ooo0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 2); -// keys.clear(); -// db.Keys(DataType::kSets, "*", &keys); -// ASSERT_EQ(keys.size(), 2); -// ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_SET_KEY6_0xxx0"); -// ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_SET_KEY8_0xxx0"); -// type_status.clear(); -// db.Del(keys); - -// // ***************** Group 6 Test ***************** -// size_t gp6_total_set = 23333; -// for (size_t idx = 0; idx < gp6_total_set; ++idx) { -// db.SAdd("GP6_PKPATTERNMATCHDEL_SET_KEY" + std::to_string(idx), {"M1"}, &ret); -// } -// s = db.PKPatternMatchDel(DataType::kSets, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, gp6_total_set); -// keys.clear(); -// db.Keys(DataType::kSets, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// //=============================== Hashes =============================== - -// // ***************** Group 1 Test ***************** -// db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY1", "FIELD", "VALUE", &ret); -// db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY2", "FIELD", "VALUE", &ret); -// db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY3", "FIELD", "VALUE", &ret); -// db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY4", "FIELD", "VALUE", &ret); -// db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY5", "FIELD", "VALUE", &ret); -// db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY6", "FIELD", "VALUE", &ret); -// s = db.PKPatternMatchDel(DataType::kHashes, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 6); -// keys.clear(); -// db.Keys(DataType::kHashes, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 2 Test ***************** -// db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY1", "FIELD", "VALUE", &ret); -// db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY2", "FIELD", "VALUE", &ret); -// db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY3", "FIELD", "VALUE", &ret); -// db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY4", "FIELD", "VALUE", &ret); -// db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY5", "FIELD", "VALUE", &ret); -// db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY6", "FIELD", "VALUE", &ret); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_HASH_KEY1")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_HASH_KEY3")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_HASH_KEY5")); -// s = db.PKPatternMatchDel(DataType::kHashes, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kHashes, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 3 Test ***************** -// db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY1_0xxx0", "FIELD", "VALUE", &ret); -// db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY2_0ooo0", "FIELD", "VALUE", &ret); -// db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY3_0xxx0", "FIELD", "VALUE", &ret); -// db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY4_0ooo0", "FIELD", "VALUE", &ret); -// db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY5_0xxx0", "FIELD", "VALUE", &ret); -// db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY6_0ooo0", "FIELD", "VALUE", &ret); -// s = db.PKPatternMatchDel(DataType::kHashes, "*0ooo0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kHashes, "*", &keys); -// ASSERT_EQ(keys.size(), 3); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_HASH_KEY1_0xxx0", keys[0]); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_HASH_KEY3_0xxx0", keys[1]); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_HASH_KEY5_0xxx0", keys[2]); -// type_status.clear(); -// db.Del(keys); - -// // ***************** Group 4 Test ***************** -// db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY1", "FIELD", "VALUE", &ret); -// db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY2", "FIELD", "VALUE", &ret); -// db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY3", "FIELD", "VALUE", &ret); -// db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY4", "FIELD", "VALUE", &ret); -// db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY5", "FIELD", "VALUE", &ret); -// db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY6", "FIELD", "VALUE", &ret); -// db.HDel("GP4_PKPATTERNMATCHDEL_HASH_KEY1", {"FIELD"}, &ret); -// db.HDel("GP4_PKPATTERNMATCHDEL_HASH_KEY3", {"FIELD"}, &ret); -// db.HDel("GP4_PKPATTERNMATCHDEL_HASH_KEY5", {"FIELD"}, &ret); -// s = db.PKPatternMatchDel(DataType::kHashes, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kHashes, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 5 Test ***************** -// db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY1_0ooo0", "FIELD", "VALUE", &ret); -// db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY2_0xxx0", "FIELD", "VALUE", &ret); -// db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY3_0ooo0", "FIELD", "VALUE", &ret); -// db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY4_0xxx0", "FIELD", "VALUE", &ret); -// db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY5_0ooo0", "FIELD", "VALUE", &ret); -// db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY6_0xxx0", "FIELD", "VALUE", &ret); -// db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY7_0ooo0", "FIELD", "VALUE", &ret); -// db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY8_0xxx0", "FIELD", "VALUE", &ret); -// ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_HASH_KEY1_0ooo0")); -// ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_HASH_KEY2_0xxx0")); -// db.HDel("GP5_PKPATTERNMATCHDEL_HASH_KEY3_0ooo0", {"FIELD"}, &ret); -// db.HDel("GP5_PKPATTERNMATCHDEL_HASH_KEY4_0xxx0", {"FIELD"}, &ret); -// s = db.PKPatternMatchDel(DataType::kHashes, "*0ooo0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 2); -// keys.clear(); -// db.Keys(DataType::kHashes, "*", &keys); -// ASSERT_EQ(keys.size(), 2); -// ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_HASH_KEY6_0xxx0"); -// ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_HASH_KEY8_0xxx0"); -// type_status.clear(); -// db.Del(keys); - -// // ***************** Group 6 Test ***************** -// size_t gp6_total_hash = 23333; -// for (size_t idx = 0; idx < gp6_total_hash; ++idx) { -// db.HSet("GP6_PKPATTERNMATCHDEL_HASH_KEY" + std::to_string(idx), "FIELD", "VALUE", &ret); -// } -// s = db.PKPatternMatchDel(DataType::kHashes, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, gp6_total_hash); -// keys.clear(); -// db.Keys(DataType::kHashes, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// //=============================== ZSets =============================== - -// // ***************** Group 1 Test ***************** -// db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY1", {{1, "M"}}, &ret); -// db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY2", {{1, "M"}}, &ret); -// db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY3", {{1, "M"}}, &ret); -// db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY4", {{1, "M"}}, &ret); -// db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY5", {{1, "M"}}, &ret); -// db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY6", {{1, "M"}}, &ret); -// s = db.PKPatternMatchDel(DataType::kZSets, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 6); -// keys.clear(); -// db.Keys(DataType::kZSets, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 2 Test ***************** -// db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY1", {{1, "M"}}, &ret); -// db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY2", {{1, "M"}}, &ret); -// db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY3", {{1, "M"}}, &ret); -// db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY4", {{1, "M"}}, &ret); -// db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY5", {{1, "M"}}, &ret); -// db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY6", {{1, "M"}}, &ret); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_ZSET_KEY1")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_ZSET_KEY3")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_ZSET_KEY5")); -// s = db.PKPatternMatchDel(DataType::kZSets, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kZSets, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 3 Test ***************** -// db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY1_0xxx0", {{1, "M"}}, &ret); -// db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY2_0ooo0", {{1, "M"}}, &ret); -// db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY3_0xxx0", {{1, "M"}}, &ret); -// db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY4_0ooo0", {{1, "M"}}, &ret); -// db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY5_0xxx0", {{1, "M"}}, &ret); -// db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY6_0ooo0", {{1, "M"}}, &ret); -// s = db.PKPatternMatchDel(DataType::kZSets, "*0ooo0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kZSets, "*", &keys); -// ASSERT_EQ(keys.size(), 3); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_ZSET_KEY1_0xxx0", keys[0]); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_ZSET_KEY3_0xxx0", keys[1]); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_ZSET_KEY5_0xxx0", keys[2]); -// type_status.clear(); -// db.Del(keys); - -// // ***************** Group 4 Test ***************** -// db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY1", {{1, "M"}}, &ret); -// db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY2", {{1, "M"}}, &ret); -// db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY3", {{1, "M"}}, &ret); -// db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY4", {{1, "M"}}, &ret); -// db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY5", {{1, "M"}}, &ret); -// db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY6", {{1, "M"}}, &ret); -// db.ZRem("GP4_PKPATTERNMATCHDEL_ZSET_KEY1", {"M"}, &ret); -// db.ZRem("GP4_PKPATTERNMATCHDEL_ZSET_KEY3", {"M"}, &ret); -// db.ZRem("GP4_PKPATTERNMATCHDEL_ZSET_KEY5", {"M"}, &ret); -// s = db.PKPatternMatchDel(DataType::kZSets, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kZSets, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 5 Test ***************** -// db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY1_0ooo0", {{1, "M"}}, &ret); -// db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY2_0xxx0", {{1, "M"}}, &ret); -// db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY3_0ooo0", {{1, "M"}}, &ret); -// db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY4_0xxx0", {{1, "M"}}, &ret); -// db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY5_0ooo0", {{1, "M"}}, &ret); -// db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY6_0xxx0", {{1, "M"}}, &ret); -// db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY7_0ooo0", {{1, "M"}}, &ret); -// db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY8_0xxx0", {{1, "M"}}, &ret); -// ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_ZSET_KEY1_0ooo0")); -// ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_ZSET_KEY2_0xxx0")); -// db.ZRem("GP5_PKPATTERNMATCHDEL_ZSET_KEY3_0ooo0", {"M"}, &ret); -// db.ZRem("GP5_PKPATTERNMATCHDEL_ZSET_KEY4_0xxx0", {"M"}, &ret); -// s = db.PKPatternMatchDel(DataType::kZSets, "*0ooo0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 2); -// keys.clear(); -// db.Keys(DataType::kZSets, "*", &keys); -// ASSERT_EQ(keys.size(), 2); -// ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_ZSET_KEY6_0xxx0"); -// ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_ZSET_KEY8_0xxx0"); -// type_status.clear(); -// db.Del(keys); - -// // ***************** Group 6 Test ***************** -// size_t gp6_total_zset = 23333; -// for (size_t idx = 0; idx < gp6_total_zset; ++idx) { -// db.ZAdd("GP6_PKPATTERNMATCHDEL_ZSET_KEY" + std::to_string(idx), {{1, "M"}}, &ret); -// } -// s = db.PKPatternMatchDel(DataType::kZSets, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, gp6_total_zset); -// keys.clear(); -// db.Keys(DataType::kZSets, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// //=============================== List =============================== - -// // ***************** Group 1 Test ***************** -// db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY1", {"VALUE"}, &ret64); -// db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY2", {"VALUE"}, &ret64); -// db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY3", {"VALUE"}, &ret64); -// db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY4", {"VALUE"}, &ret64); -// db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY5", {"VALUE"}, &ret64); -// db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY6", {"VALUE"}, &ret64); -// s = db.PKPatternMatchDel(DataType::kLists, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 6); -// keys.clear(); -// db.Keys(DataType::kLists, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 2 Test ***************** -// db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY1", {"VALUE"}, &ret64); -// db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY2", {"VALUE"}, &ret64); -// db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY3", {"VALUE"}, &ret64); -// db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY4", {"VALUE"}, &ret64); -// db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY5", {"VALUE"}, &ret64); -// db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY6", {"VALUE"}, &ret64); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_LIST_KEY1")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_LIST_KEY3")); -// ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_LIST_KEY5")); -// s = db.PKPatternMatchDel(DataType::kLists, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kLists, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 3 Test ***************** -// db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY1_0xxx0", {"VALUE"}, &ret64); -// db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY2_0ooo0", {"VALUE"}, &ret64); -// db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY3_0xxx0", {"VALUE"}, &ret64); -// db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY4_0ooo0", {"VALUE"}, &ret64); -// db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY5_0xxx0", {"VALUE"}, &ret64); -// db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY6_0ooo0", {"VALUE"}, &ret64); -// s = db.PKPatternMatchDel(DataType::kLists, "*0ooo0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kLists, "*", &keys); -// ASSERT_EQ(keys.size(), 3); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_LIST_KEY1_0xxx0", keys[0]); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_LIST_KEY3_0xxx0", keys[1]); -// ASSERT_EQ("GP3_PKPATTERNMATCHDEL_LIST_KEY5_0xxx0", keys[2]); -// type_status.clear(); -// db.Del(keys); - -// // ***************** Group 4 Test ***************** -// db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY1", {"VALUE"}, &ret64); -// db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY2", {"VALUE"}, &ret64); -// db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY3", {"VALUE"}, &ret64); -// db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY4", {"VALUE"}, &ret64); -// db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY5", {"VALUE"}, &ret64); -// db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY6", {"VALUE"}, &ret64); -// db.LRem("GP4_PKPATTERNMATCHDEL_LIST_KEY1", 1, "VALUE", &ret64); -// db.LRem("GP4_PKPATTERNMATCHDEL_LIST_KEY3", 1, "VALUE", &ret64); -// db.LRem("GP4_PKPATTERNMATCHDEL_LIST_KEY5", 1, "VALUE", &ret64); -// s = db.PKPatternMatchDel(DataType::kLists, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 3); -// keys.clear(); -// db.Keys(DataType::kLists, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// // ***************** Group 5 Test ***************** -// db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY1_0ooo0", {"VALUE"}, &ret64); -// db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY2_0xxx0", {"VALUE"}, &ret64); -// db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY3_0ooo0", {"VALUE"}, &ret64); -// db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY4_0xxx0", {"VALUE"}, &ret64); -// db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY5_0ooo0", {"VALUE"}, &ret64); -// db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY6_0xxx0", {"VALUE"}, &ret64); -// db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY7_0ooo0", {"VALUE"}, &ret64); -// db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY8_0xxx0", {"VALUE"}, &ret64); -// ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_LIST_KEY1_0ooo0")); -// ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_LIST_KEY2_0xxx0")); -// db.LRem("GP5_PKPATTERNMATCHDEL_LIST_KEY3_0ooo0", 1, "VALUE", &ret64); -// db.LRem("GP5_PKPATTERNMATCHDEL_LIST_KEY4_0xxx0", 1, "VALUE", &ret64); -// s = db.PKPatternMatchDel(DataType::kLists, "*0ooo0", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, 2); -// keys.clear(); -// db.Keys(DataType::kLists, "*", &keys); -// ASSERT_EQ(keys.size(), 2); -// ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_LIST_KEY6_0xxx0"); -// ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_LIST_KEY8_0xxx0"); -// type_status.clear(); -// db.Del(keys); - -// // ***************** Group 6 Test ***************** -// size_t gp6_total_list = 23333; -// for (size_t idx = 0; idx < gp6_total_list; ++idx) { -// db.LPush("GP6_PKPATTERNMATCHDEL_LIST_KEY" + std::to_string(idx), {"VALUE"}, &ret64); -// } -// s = db.PKPatternMatchDel(DataType::kLists, "*", &delete_count); -// ASSERT_TRUE(s.ok()); -// ASSERT_EQ(delete_count, gp6_total_hash); -// keys.clear(); -// db.Keys(DataType::kLists, "*", &keys); -// ASSERT_EQ(keys.size(), 0); - -// sleep(2); -// db.Compact(DataType::kAll, true); -// } +TEST_F(KeysTest, PKPatternMatchDel) { + int32_t ret; + uint64_t ret64; + int32_t delete_count = 0; + std::vector keys; + std::map type_status; + + //=============================== Strings =============================== + + // ***************** Group 1 Test ***************** + db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY1", "VALUE"); + db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY2", "VALUE"); + db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY3", "VALUE"); + db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY4", "VALUE"); + db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY5", "VALUE"); + db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY6", "VALUE"); + s = db.PKPatternMatchDel(DataType::kStrings, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 6); + keys.clear(); + db.Keys(DataType::kStrings, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 2 Test ***************** + db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY1", "VALUE"); + db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY2", "VALUE"); + db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY3", "VALUE"); + db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY4", "VALUE"); + db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY5", "VALUE"); + db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY6", "VALUE"); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_STRING_KEY1")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_STRING_KEY3")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_STRING_KEY5")); + s = db.PKPatternMatchDel(DataType::kStrings, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kStrings, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 3 Test ***************** + db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY1_0xxx0", "VALUE"); + db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY2_0ooo0", "VALUE"); + db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY3_0xxx0", "VALUE"); + db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY4_0ooo0", "VALUE"); + db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY5_0xxx0", "VALUE"); + db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY6_0ooo0", "VALUE"); + s = db.PKPatternMatchDel(DataType::kStrings, "*0xxx0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kStrings, "*", &keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ(keys[0], "GP3_PKPATTERNMATCHDEL_STRING_KEY2_0ooo0"); + ASSERT_EQ(keys[1], "GP3_PKPATTERNMATCHDEL_STRING_KEY4_0ooo0"); + ASSERT_EQ(keys[2], "GP3_PKPATTERNMATCHDEL_STRING_KEY6_0ooo0"); + type_status.clear(); + db.Del(keys); + + // ***************** Group 4 Test ***************** + db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY1", "VALUE"); + db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY2_0ooo0", "VALUE"); + db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY3", "VALUE"); + db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY4_0ooo0", "VALUE"); + db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY5", "VALUE"); + db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY6_0ooo0", "VALUE"); + ASSERT_TRUE(make_expired(&db, "GP4_PKPATTERNMATCHDEL_STRING_KEY1")); + ASSERT_TRUE(make_expired(&db, "GP4_PKPATTERNMATCHDEL_STRING_KEY3")); + ASSERT_TRUE(make_expired(&db, "GP4_PKPATTERNMATCHDEL_STRING_KEY5")); + s = db.PKPatternMatchDel(DataType::kStrings, "*0ooo0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kStrings, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 5 Test ***************** + size_t gp5_total_kv = 23333; + for (size_t idx = 0; idx < gp5_total_kv; ++idx) { + db.Set("GP5_PKPATTERNMATCHDEL_STRING_KEY" + std::to_string(idx), "VALUE"); + } + s = db.PKPatternMatchDel(DataType::kStrings, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, gp5_total_kv); + keys.clear(); + db.Keys(DataType::kStrings, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + //=============================== Set =============================== + + // ***************** Group 1 Test ***************** + db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); + db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY2", {"M1"}, &ret); + db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); + db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY4", {"M1"}, &ret); + db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); + db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY6", {"M1"}, &ret); + s = db.PKPatternMatchDel(DataType::kSets, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 6); + keys.clear(); + db.Keys(DataType::kSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 2 Test ***************** + db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); + db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY2", {"M1"}, &ret); + db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); + db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY4", {"M1"}, &ret); + db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); + db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY6", {"M1"}, &ret); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_SET_KEY1")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_SET_KEY3")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_SET_KEY5")); + s = db.PKPatternMatchDel(DataType::kSets, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 3 Test ***************** + db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY1_0xxx0", {"M1"}, &ret); + db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY2_0ooo0", {"M1"}, &ret); + db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY3_0xxx0", {"M1"}, &ret); + db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY4_0ooo0", {"M1"}, &ret); + db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY5_0xxx0", {"M1"}, &ret); + db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY6_0ooo0", {"M1"}, &ret); + s = db.PKPatternMatchDel(DataType::kSets, "*0ooo0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kSets, "*", &keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_SET_KEY1_0xxx0", keys[0]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_SET_KEY3_0xxx0", keys[1]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_SET_KEY5_0xxx0", keys[2]); + type_status.clear(); + db.Del(keys); + + // ***************** Group 4 Test ***************** + db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); + db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY2", {"M1"}, &ret); + db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); + db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY4", {"M1"}, &ret); + db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); + db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY6", {"M1"}, &ret); + db.SRem("GP4_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); + db.SRem("GP4_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); + db.SRem("GP4_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); + s = db.PKPatternMatchDel(DataType::kSets, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 5 Test ***************** + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY1_0ooo0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY2_0xxx0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY3_0ooo0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY4_0xxx0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY5_0ooo0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY6_0xxx0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY7_0ooo0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY8_0xxx0", {"M1"}, &ret); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_SET_KEY1_0ooo0")); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_SET_KEY2_0xxx0")); + db.SRem("GP5_PKPATTERNMATCHDEL_SET_KEY3_0ooo0", {"M1"}, &ret); + db.SRem("GP5_PKPATTERNMATCHDEL_SET_KEY4_0xxx0", {"M1"}, &ret); + s = db.PKPatternMatchDel(DataType::kSets, "*0ooo0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 2); + keys.clear(); + db.Keys(DataType::kSets, "*", &keys); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_SET_KEY6_0xxx0"); + ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_SET_KEY8_0xxx0"); + type_status.clear(); + db.Del(keys); + + // ***************** Group 6 Test ***************** + size_t gp6_total_set = 23333; + for (size_t idx = 0; idx < gp6_total_set; ++idx) { + db.SAdd("GP6_PKPATTERNMATCHDEL_SET_KEY" + std::to_string(idx), {"M1"}, &ret); + } + s = db.PKPatternMatchDel(DataType::kSets, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, gp6_total_set); + keys.clear(); + db.Keys(DataType::kSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + //=============================== Hashes =============================== + + // ***************** Group 1 Test ***************** + db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY1", "FIELD", "VALUE", &ret); + db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY2", "FIELD", "VALUE", &ret); + db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY3", "FIELD", "VALUE", &ret); + db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY4", "FIELD", "VALUE", &ret); + db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY5", "FIELD", "VALUE", &ret); + db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY6", "FIELD", "VALUE", &ret); + s = db.PKPatternMatchDel(DataType::kHashes, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 6); + keys.clear(); + db.Keys(DataType::kHashes, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 2 Test ***************** + db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY1", "FIELD", "VALUE", &ret); + db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY2", "FIELD", "VALUE", &ret); + db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY3", "FIELD", "VALUE", &ret); + db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY4", "FIELD", "VALUE", &ret); + db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY5", "FIELD", "VALUE", &ret); + db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY6", "FIELD", "VALUE", &ret); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_HASH_KEY1")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_HASH_KEY3")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_HASH_KEY5")); + s = db.PKPatternMatchDel(DataType::kHashes, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kHashes, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 3 Test ***************** + db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY1_0xxx0", "FIELD", "VALUE", &ret); + db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY2_0ooo0", "FIELD", "VALUE", &ret); + db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY3_0xxx0", "FIELD", "VALUE", &ret); + db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY4_0ooo0", "FIELD", "VALUE", &ret); + db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY5_0xxx0", "FIELD", "VALUE", &ret); + db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY6_0ooo0", "FIELD", "VALUE", &ret); + s = db.PKPatternMatchDel(DataType::kHashes, "*0ooo0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kHashes, "*", &keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_HASH_KEY1_0xxx0", keys[0]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_HASH_KEY3_0xxx0", keys[1]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_HASH_KEY5_0xxx0", keys[2]); + type_status.clear(); + db.Del(keys); + + // ***************** Group 4 Test ***************** + db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY1", "FIELD", "VALUE", &ret); + db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY2", "FIELD", "VALUE", &ret); + db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY3", "FIELD", "VALUE", &ret); + db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY4", "FIELD", "VALUE", &ret); + db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY5", "FIELD", "VALUE", &ret); + db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY6", "FIELD", "VALUE", &ret); + db.HDel("GP4_PKPATTERNMATCHDEL_HASH_KEY1", {"FIELD"}, &ret); + db.HDel("GP4_PKPATTERNMATCHDEL_HASH_KEY3", {"FIELD"}, &ret); + db.HDel("GP4_PKPATTERNMATCHDEL_HASH_KEY5", {"FIELD"}, &ret); + s = db.PKPatternMatchDel(DataType::kHashes, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kHashes, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 5 Test ***************** + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY1_0ooo0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY2_0xxx0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY3_0ooo0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY4_0xxx0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY5_0ooo0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY6_0xxx0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY7_0ooo0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY8_0xxx0", "FIELD", "VALUE", &ret); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_HASH_KEY1_0ooo0")); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_HASH_KEY2_0xxx0")); + db.HDel("GP5_PKPATTERNMATCHDEL_HASH_KEY3_0ooo0", {"FIELD"}, &ret); + db.HDel("GP5_PKPATTERNMATCHDEL_HASH_KEY4_0xxx0", {"FIELD"}, &ret); + s = db.PKPatternMatchDel(DataType::kHashes, "*0ooo0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 2); + keys.clear(); + db.Keys(DataType::kHashes, "*", &keys); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_HASH_KEY6_0xxx0"); + ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_HASH_KEY8_0xxx0"); + type_status.clear(); + db.Del(keys); + + // ***************** Group 6 Test ***************** + size_t gp6_total_hash = 23333; + for (size_t idx = 0; idx < gp6_total_hash; ++idx) { + db.HSet("GP6_PKPATTERNMATCHDEL_HASH_KEY" + std::to_string(idx), "FIELD", "VALUE", &ret); + } + s = db.PKPatternMatchDel(DataType::kHashes, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, gp6_total_hash); + keys.clear(); + db.Keys(DataType::kHashes, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + //=============================== ZSets =============================== + + // ***************** Group 1 Test ***************** + db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY1", {{1, "M"}}, &ret); + db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY2", {{1, "M"}}, &ret); + db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY3", {{1, "M"}}, &ret); + db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY4", {{1, "M"}}, &ret); + db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY5", {{1, "M"}}, &ret); + db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY6", {{1, "M"}}, &ret); + s = db.PKPatternMatchDel(DataType::kZSets, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 6); + keys.clear(); + db.Keys(DataType::kZSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 2 Test ***************** + db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY1", {{1, "M"}}, &ret); + db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY2", {{1, "M"}}, &ret); + db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY3", {{1, "M"}}, &ret); + db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY4", {{1, "M"}}, &ret); + db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY5", {{1, "M"}}, &ret); + db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY6", {{1, "M"}}, &ret); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_ZSET_KEY1")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_ZSET_KEY3")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_ZSET_KEY5")); + s = db.PKPatternMatchDel(DataType::kZSets, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kZSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 3 Test ***************** + db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY1_0xxx0", {{1, "M"}}, &ret); + db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY2_0ooo0", {{1, "M"}}, &ret); + db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY3_0xxx0", {{1, "M"}}, &ret); + db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY4_0ooo0", {{1, "M"}}, &ret); + db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY5_0xxx0", {{1, "M"}}, &ret); + db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY6_0ooo0", {{1, "M"}}, &ret); + s = db.PKPatternMatchDel(DataType::kZSets, "*0ooo0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kZSets, "*", &keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_ZSET_KEY1_0xxx0", keys[0]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_ZSET_KEY3_0xxx0", keys[1]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_ZSET_KEY5_0xxx0", keys[2]); + type_status.clear(); + db.Del(keys); + + // ***************** Group 4 Test ***************** + db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY1", {{1, "M"}}, &ret); + db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY2", {{1, "M"}}, &ret); + db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY3", {{1, "M"}}, &ret); + db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY4", {{1, "M"}}, &ret); + db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY5", {{1, "M"}}, &ret); + db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY6", {{1, "M"}}, &ret); + db.ZRem("GP4_PKPATTERNMATCHDEL_ZSET_KEY1", {"M"}, &ret); + db.ZRem("GP4_PKPATTERNMATCHDEL_ZSET_KEY3", {"M"}, &ret); + db.ZRem("GP4_PKPATTERNMATCHDEL_ZSET_KEY5", {"M"}, &ret); + s = db.PKPatternMatchDel(DataType::kZSets, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kZSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 5 Test ***************** + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY1_0ooo0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY2_0xxx0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY3_0ooo0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY4_0xxx0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY5_0ooo0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY6_0xxx0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY7_0ooo0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY8_0xxx0", {{1, "M"}}, &ret); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_ZSET_KEY1_0ooo0")); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_ZSET_KEY2_0xxx0")); + db.ZRem("GP5_PKPATTERNMATCHDEL_ZSET_KEY3_0ooo0", {"M"}, &ret); + db.ZRem("GP5_PKPATTERNMATCHDEL_ZSET_KEY4_0xxx0", {"M"}, &ret); + s = db.PKPatternMatchDel(DataType::kZSets, "*0ooo0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 2); + keys.clear(); + db.Keys(DataType::kZSets, "*", &keys); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_ZSET_KEY6_0xxx0"); + ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_ZSET_KEY8_0xxx0"); + type_status.clear(); + db.Del(keys); + + // ***************** Group 6 Test ***************** + size_t gp6_total_zset = 23333; + for (size_t idx = 0; idx < gp6_total_zset; ++idx) { + db.ZAdd("GP6_PKPATTERNMATCHDEL_ZSET_KEY" + std::to_string(idx), {{1, "M"}}, &ret); + } + s = db.PKPatternMatchDel(DataType::kZSets, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, gp6_total_zset); + keys.clear(); + db.Keys(DataType::kZSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + //=============================== List =============================== + + // ***************** Group 1 Test ***************** + db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY1", {"VALUE"}, &ret64); + db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY2", {"VALUE"}, &ret64); + db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY3", {"VALUE"}, &ret64); + db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY4", {"VALUE"}, &ret64); + db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY5", {"VALUE"}, &ret64); + db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY6", {"VALUE"}, &ret64); + s = db.PKPatternMatchDel(DataType::kLists, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 6); + keys.clear(); + db.Keys(DataType::kLists, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 2 Test ***************** + db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY1", {"VALUE"}, &ret64); + db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY2", {"VALUE"}, &ret64); + db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY3", {"VALUE"}, &ret64); + db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY4", {"VALUE"}, &ret64); + db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY5", {"VALUE"}, &ret64); + db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY6", {"VALUE"}, &ret64); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_LIST_KEY1")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_LIST_KEY3")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_LIST_KEY5")); + s = db.PKPatternMatchDel(DataType::kLists, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kLists, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 3 Test ***************** + db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY1_0xxx0", {"VALUE"}, &ret64); + db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY2_0ooo0", {"VALUE"}, &ret64); + db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY3_0xxx0", {"VALUE"}, &ret64); + db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY4_0ooo0", {"VALUE"}, &ret64); + db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY5_0xxx0", {"VALUE"}, &ret64); + db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY6_0ooo0", {"VALUE"}, &ret64); + s = db.PKPatternMatchDel(DataType::kLists, "*0ooo0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kLists, "*", &keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_LIST_KEY1_0xxx0", keys[0]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_LIST_KEY3_0xxx0", keys[1]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_LIST_KEY5_0xxx0", keys[2]); + type_status.clear(); + db.Del(keys); + + // ***************** Group 4 Test ***************** + db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY1", {"VALUE"}, &ret64); + db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY2", {"VALUE"}, &ret64); + db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY3", {"VALUE"}, &ret64); + db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY4", {"VALUE"}, &ret64); + db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY5", {"VALUE"}, &ret64); + db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY6", {"VALUE"}, &ret64); + db.LRem("GP4_PKPATTERNMATCHDEL_LIST_KEY1", 1, "VALUE", &ret64); + db.LRem("GP4_PKPATTERNMATCHDEL_LIST_KEY3", 1, "VALUE", &ret64); + db.LRem("GP4_PKPATTERNMATCHDEL_LIST_KEY5", 1, "VALUE", &ret64); + s = db.PKPatternMatchDel(DataType::kLists, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + keys.clear(); + db.Keys(DataType::kLists, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 5 Test ***************** + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY1_0ooo0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY2_0xxx0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY3_0ooo0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY4_0xxx0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY5_0ooo0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY6_0xxx0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY7_0ooo0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY8_0xxx0", {"VALUE"}, &ret64); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_LIST_KEY1_0ooo0")); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_LIST_KEY2_0xxx0")); + db.LRem("GP5_PKPATTERNMATCHDEL_LIST_KEY3_0ooo0", 1, "VALUE", &ret64); + db.LRem("GP5_PKPATTERNMATCHDEL_LIST_KEY4_0xxx0", 1, "VALUE", &ret64); + s = db.PKPatternMatchDel(DataType::kLists, "*0ooo0", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 2); + keys.clear(); + db.Keys(DataType::kLists, "*", &keys); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_LIST_KEY6_0xxx0"); + ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_LIST_KEY8_0xxx0"); + type_status.clear(); + db.Del(keys); + + // ***************** Group 6 Test ***************** + size_t gp6_total_list = 23333; + for (size_t idx = 0; idx < gp6_total_list; ++idx) { + db.LPush("GP6_PKPATTERNMATCHDEL_LIST_KEY" + std::to_string(idx), {"VALUE"}, &ret64); + } + s = db.PKPatternMatchDel(DataType::kLists, "*", &delete_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, gp6_total_hash); + keys.clear(); + db.Keys(DataType::kLists, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + sleep(2); + db.Compact(DataType::kAll, true); +} // Scan // Note: This test needs to execute at first because all of the data is