diff --git a/src/NRedisStack/ResponseParser.cs b/src/NRedisStack/ResponseParser.cs index 54001dec..8a98bbcd 100644 --- a/src/NRedisStack/ResponseParser.cs +++ b/src/NRedisStack/ResponseParser.cs @@ -150,7 +150,7 @@ public static IReadOnlyList ToLabelArray(RedisResult result) return list; } - public static IReadOnlyList<(string key, IReadOnlyList labels, TimeSeriesTuple value)> ParseMGetesponse(RedisResult result) + public static IReadOnlyList<(string key, IReadOnlyList labels, TimeSeriesTuple value)> ParseMGetResponse(RedisResult result) { RedisResult[] redisResults = (RedisResult[])result; var list = new List<(string key, IReadOnlyList labels, TimeSeriesTuple values)>(redisResults.Length); @@ -361,7 +361,7 @@ public static IReadOnlyList ToRuleArray(RedisResult result) compression = capacity = mergedNodes = unmergedNodes = totalCompressions = -1; mergedWeight = unmergedWeight = -1.0; - + RedisResult[] redisResults = ToArray(result); for (int i = 0; i < redisResults.Length; ++i) @@ -402,10 +402,10 @@ public static IReadOnlyList ToRuleArray(RedisResult result) public static TimeSeriesInformation ToTimeSeriesInfo(RedisResult result) { long totalSamples = -1, memoryUsage = -1, retentionTime = -1, chunkSize = -1, chunkCount = -1; - TimeStamp firstTimestamp = null, lastTimestamp = null; - IReadOnlyList labels = null; - IReadOnlyList rules = null; - string sourceKey = null; + TimeStamp? firstTimestamp = null, lastTimestamp = null; + IReadOnlyList? labels = null; + IReadOnlyList? rules = null; + string? sourceKey = null; TsDuplicatePolicy? duplicatePolicy = null; RedisResult[] redisResults = (RedisResult[])result; for (int i = 0; i < redisResults.Length; ++i) @@ -459,7 +459,7 @@ public static TimeSeriesInformation ToTimeSeriesInfo(RedisResult result) lastTimestamp, retentionTime, chunkCount, chunkSize, labels, sourceKey, rules, duplicatePolicy); } - public static IReadOnlyList? ToStringArray(RedisResult result) + public static IReadOnlyList ToStringArray(RedisResult result) { RedisResult[] redisResults = ToArray(result); diff --git a/src/NRedisStack/TimeSeries/TimeSeriesAux.cs b/src/NRedisStack/TimeSeries/TimeSeriesAux.cs index 9073f2f2..32476e9c 100644 --- a/src/NRedisStack/TimeSeries/TimeSeriesAux.cs +++ b/src/NRedisStack/TimeSeries/TimeSeriesAux.cs @@ -193,11 +193,11 @@ public static List BuildTsCreateArgs(string key, long? retentionTime, IR return args; } - public static List BuildTsAlterArgs(string key, long? retentionTime, IReadOnlyCollection labels) + public static List BuildTsAlterArgs(string key, long? retentionTime, IReadOnlyCollection? labels) { var args = new List {key}; args.AddRetentionTime(retentionTime); - args.AddLabels(labels); + if (labels != null) args.AddLabels(labels); return args; } @@ -213,14 +213,14 @@ public static List BuildTsAddArgs(string key, TimeStamp timestamp, doubl return args; } - public static List BuildTsIncrDecrByArgs(string key, double value, TimeStamp timestamp, long? retentionTime, - IReadOnlyCollection labels, bool? uncompressed, long? chunkSizeBytes) + public static List BuildTsIncrDecrByArgs(string key, double value, TimeStamp? timestamp, long? retentionTime, + IReadOnlyCollection? labels, bool? uncompressed, long? chunkSizeBytes) { var args = new List {key, value}; - args.AddTimeStamp(timestamp); + if (timestamp != null) args.AddTimeStamp(timestamp); args.AddRetentionTime(retentionTime); args.AddChunkSize(chunkSizeBytes); - args.AddLabels(labels); + if (labels != null) args.AddLabels(labels); args.AddUncompressed(uncompressed); return args; } @@ -252,16 +252,23 @@ public static List BuildTsMgetArgs(IReadOnlyCollection filter, b return args; } - public static List BuildRangeArgs(string key, TimeStamp fromTimeStamp, TimeStamp toTimeStamp, long? count, - TsAggregation? aggregation, long? timeBucket, IReadOnlyCollection filterByTs, (long, long)? filterByValue, - TimeStamp align) + public static List BuildRangeArgs(string key, + TimeStamp fromTimeStamp, + TimeStamp toTimeStamp, + long? count, + TsAggregation? aggregation, + long? timeBucket, + IReadOnlyCollection? filterByTs, + (long, long)? filterByValue, + TimeStamp? align) { var args = new List() {key, fromTimeStamp.Value, toTimeStamp.Value}; - args.AddFilterByTs(filterByTs); + + if (filterByTs != null) args.AddFilterByTs(filterByTs); args.AddFilterByValue(filterByValue); args.AddCount(count); - args.AddAlign(align); + if (align != null) args.AddAlign(align); args.AddAggregation(aggregation, timeBucket); return args; } @@ -269,16 +276,16 @@ public static List BuildRangeArgs(string key, TimeStamp fromTimeStamp, T public static List BuildMultiRangeArgs(TimeStamp fromTimeStamp, TimeStamp toTimeStamp, IReadOnlyCollection filter, long? count, TsAggregation? aggregation, long? timeBucket, - bool? withLabels, (string, TsReduce)? groupbyTuple, IReadOnlyCollection filterByTs, - (long, long)? filterByValue, IReadOnlyCollection selectLabels, TimeStamp align) + bool? withLabels, (string, TsReduce)? groupbyTuple, IReadOnlyCollection? filterByTs, + (long, long)? filterByValue, IReadOnlyCollection? selectLabels, TimeStamp? align) { var args = new List() {fromTimeStamp.Value, toTimeStamp.Value}; - args.AddFilterByTs(filterByTs); + if (filterByTs != null) args.AddFilterByTs(filterByTs); args.AddFilterByValue(filterByValue); args.AddCount(count); - args.AddAlign(align); + if (align != null) args.AddAlign(align); args.AddAggregation(aggregation, timeBucket); - args.AddWithLabels(withLabels, selectLabels); + if (selectLabels != null) args.AddWithLabels(withLabels, selectLabels); args.AddFilters(filter); args.AddGroupby(groupbyTuple); return args; diff --git a/src/NRedisStack/TimeSeries/TimeSeriesCommands.cs b/src/NRedisStack/TimeSeries/TimeSeriesCommands.cs index d15e641e..de99ac76 100644 --- a/src/NRedisStack/TimeSeries/TimeSeriesCommands.cs +++ b/src/NRedisStack/TimeSeries/TimeSeriesCommands.cs @@ -14,23 +14,632 @@ public TimeSeriesCommands(IDatabase db) _db = db; } + #region Create + + /// + /// Create a new time-series. + /// + /// Key name for timeseries + /// Optional: Maximum age for samples compared to last event time (in milliseconds) + /// Optional: Collaction of label-value pairs that represent metadata labels of the key + /// Optional: Adding this flag will keep data in an uncompressed form + /// Optional: Each time-series uses chunks of memory of fixed size for time series samples. + /// You can alter the default TS_db chunk size by passing the chunk_size argument (in Bytes) + /// Optinal: Define handling of duplicate samples behavior (avalible for RedisTimeseries >= 1.4) + /// If the operation executed successfully public bool Create(string key, long? retentionTime = null, IReadOnlyCollection labels = null, bool? uncompressed = null, long? chunkSizeBytes = null, TsDuplicatePolicy? duplicatePolicy = null) { var args = TimeSeriesAux.BuildTsCreateArgs(key, retentionTime, labels, uncompressed, chunkSizeBytes, duplicatePolicy); return ResponseParser.OKtoBoolean(_db.Execute(TS.CREATE, args)); } - public TimeSeriesInformation Info(string key) + /// + /// Create a new time-series. + /// + /// Key name for timeseries + /// Optional: Maximum age for samples compared to last event time (in milliseconds) + /// Optional: Collaction of label-value pairs that represent metadata labels of the key + /// Optional: Adding this flag will keep data in an uncompressed form + /// Optional: Each time-series uses chunks of memory of fixed size for time series samples. + /// You can alter the default TS_db chunk size by passing the chunk_size argument (in Bytes) + /// Optinal: Define handling of duplicate samples behavior (avalible for RedisTimeseries >= 1.4) + /// If the operation executed successfully + public async Task CreateAsync(string key, long? retentionTime = null, IReadOnlyCollection labels = null, bool? uncompressed = null, long? chunkSizeBytes = null, TsDuplicatePolicy? duplicatePolicy = null) { - return ResponseParser.ToTimeSeriesInfo(_db.Execute(TS.INFO, key)); + var args = TimeSeriesAux.BuildTsCreateArgs(key, retentionTime, labels, uncompressed, chunkSizeBytes, duplicatePolicy); + return ResponseParser.OKtoBoolean(await _db.ExecuteAsync(TS.CREATE, args)); } - public bool TimeSeriesAlter(string key, long? retentionTime = null, IReadOnlyCollection labels = null) + #endregion + + #region Update + + /// + /// Update the retention, labels of an existing key. + /// + /// Key name for timeseries + /// Optional: Maximum age for samples compared to last event time (in milliseconds) + /// Optional: Collaction of label-value pairs that represent metadata labels of the key + /// If the operation executed successfully + public bool Alter(string key, long? retentionTime = null, IReadOnlyCollection? labels = null) { var args = TimeSeriesAux.BuildTsAlterArgs(key, retentionTime, labels); return ResponseParser.OKtoBoolean(_db.Execute(TS.ALTER, args)); } + /// + /// Update the retention, labels of an existing key. + /// + /// Key name for timeseries + /// Optional: Maximum age for samples compared to last event time (in milliseconds) + /// Optional: Collaction of label-value pairs that represent metadata labels of the key + /// If the operation executed successfully + public async Task AlterAsync(string key, long? retentionTime = null, IReadOnlyCollection? labels = null) + { + var args = TimeSeriesAux.BuildTsAlterArgs(key, retentionTime, labels); + return ResponseParser.OKtoBoolean(await _db.ExecuteAsync(TS.ALTER, args)); + } + + /// + /// Append (or create and append) a new sample to the series. + /// + /// Key name for timeseries + /// TimeStamp to add. UNIX timestamp of the sample. * can be used for automatic timestamp (using the system clock) + /// Numeric data value of the sample. + /// Optional: Maximum age for samples compared to last event time (in milliseconds) + /// Optional: Collaction of label-value pairs that represent metadata labels of the key + /// Optional: Adding this flag will keep data in an uncompressed form + /// Optional: Each time-series uses chunks of memory of fixed size for time series samples. + /// You can alter the default TS_db chunk size by passing the chunk_size argument (in Bytes) + /// Optioal: overwrite key and database configuration for DUPLICATE_POLICY + /// The timestamp value of the new sample + public TimeStamp Add(string key, TimeStamp timestamp, double value, long? retentionTime = null, IReadOnlyCollection labels = null, bool? uncompressed = null, long? chunkSizeBytes = null, TsDuplicatePolicy? duplicatePolicy = null) + { + var args = TimeSeriesAux.BuildTsAddArgs(key, timestamp, value, retentionTime, labels, uncompressed, chunkSizeBytes, duplicatePolicy); + return ResponseParser.ToTimeStamp(_db.Execute(TS.ADD, args)); + } + + /// + /// Append (or create and append) a new sample to the series. + /// + /// Key name for timeseries + /// TimeStamp to add. UNIX timestamp of the sample. * can be used for automatic timestamp (using the system clock) + /// Numeric data value of the sample. + /// Optional: Maximum age for samples compared to last event time (in milliseconds) + /// Optional: Collaction of label-value pairs that represent metadata labels of the key + /// Optional: Adding this flag will keep data in an uncompressed form + /// Optional: Each time-series uses chunks of memory of fixed size for time series samples. + /// You can alter the default TS_db chunk size by passing the chunk_size argument (in Bytes) + /// Optioal: overwrite key and database configuration for DUPLICATE_POLICY + /// The timestamp value of the new sample + public async Task AddAsync(string key, TimeStamp timestamp, double value, long? retentionTime = null, IReadOnlyCollection labels = null, bool? uncompressed = null, long? chunkSizeBytes = null, TsDuplicatePolicy? duplicatePolicy = null) + { + var args = TimeSeriesAux.BuildTsAddArgs(key, timestamp, value, retentionTime, labels, uncompressed, chunkSizeBytes, duplicatePolicy); + return ResponseParser.ToTimeStamp(await _db.ExecuteAsync(TS.ADD, args)); + } + + /// + /// Append new samples to multiple series. + /// + /// An Collection of (key, timestamp, value) tuples + /// List of timestamps of the new samples + public IReadOnlyList MAdd(IReadOnlyCollection<(string key, TimeStamp timestamp, double value)> sequence) + { + var args = TimeSeriesAux.BuildTsMaddArgs(sequence); + return ResponseParser.ToTimeStampArray(_db.Execute(TS.MADD, args)); + } + + /// + /// Append new samples to multiple series. + /// + /// An Collection of (key, timestamp, value) tuples + /// List of timestamps of the new samples + public async Task> MAddAsync(IReadOnlyCollection<(string key, TimeStamp timestamp, double value)> sequence) + { + var args = TimeSeriesAux.BuildTsMaddArgs(sequence); + return ResponseParser.ToTimeStampArray(await _db.ExecuteAsync(TS.MADD, args)); + } + + /// + /// Creates a new sample that increments the latest sample's value. + /// + /// Key name for timeseries + /// Delta to add + /// Optional: TimeStamp to add. UNIX timestamp of the sample. * can be used for automatic timestamp (using the system clock) + /// Optional: Maximum age for samples compared to last event time (in milliseconds) + /// Optional: Collaction of label-value pairs that represent metadata labels of the key + /// Optional: Adding this flag will keep data in an uncompressed form + /// Optional: Each time-series uses chunks of memory of fixed size for time series samples. + /// You can alter the default TS_db chunk size by passing the chunk_size argument (in Bytes) + /// The latests sample timestamp (updated sample) + public TimeStamp IncrBy(string key, double value, TimeStamp? timestamp = null, long? retentionTime = null, IReadOnlyCollection? labels = null, bool? uncompressed = null, long? chunkSizeBytes = null) + { + var args = TimeSeriesAux.BuildTsIncrDecrByArgs(key, value, timestamp, retentionTime, labels, uncompressed, chunkSizeBytes); + return ResponseParser.ToTimeStamp(_db.Execute(TS.INCRBY, args)); + } + + /// + /// Creates a new sample that increments the latest sample's value. + /// + /// Key name for timeseries + /// Delta to add + /// Optional: TimeStamp to add. UNIX timestamp of the sample. * can be used for automatic timestamp (using the system clock) + /// Optional: Maximum age for samples compared to last event time (in milliseconds) + /// Optional: Collaction of label-value pairs that represent metadata labels of the key + /// Optional: Adding this flag will keep data in an uncompressed form + /// Optional: Each time-series uses chunks of memory of fixed size for time series samples. + /// You can alter the default TS_db chunk size by passing the chunk_size argument (in Bytes) + /// The latests sample timestamp (updated sample) + public async Task IncrByAsync(string key, double value, TimeStamp? timestamp = null, long? retentionTime = null, IReadOnlyCollection? labels = null, bool? uncompressed = null, long? chunkSizeBytes = null) + { + var args = TimeSeriesAux.BuildTsIncrDecrByArgs(key, value, timestamp, retentionTime, labels, uncompressed, chunkSizeBytes); + return ResponseParser.ToTimeStamp(await _db.ExecuteAsync(TS.INCRBY, args)); + } + + /// + /// Creates a new sample that decrements the latest sample's value. + /// + /// Key name for timeseries + /// Delta to substract + /// Optional: TimeStamp to add. UNIX timestamp of the sample. * can be used for automatic timestamp (using the system clock) + /// Optional: Maximum age for samples compared to last event time (in milliseconds) + /// Optional: Collaction of label-value pairs that represent metadata labels of the key + /// Optional: Adding this flag will keep data in an uncompressed form + /// Optional: Each time-series uses chunks of memory of fixed size for time series samples. + /// You can alter the default TS_db chunk size by passing the chunk_size argument (in Bytes) + /// The latests sample timestamp (updated sample) + public TimeStamp DecrBy(string key, double value, TimeStamp? timestamp = null, long? retentionTime = null, IReadOnlyCollection? labels = null, bool? uncompressed = null, long? chunkSizeBytes = null) + { + var args = TimeSeriesAux.BuildTsIncrDecrByArgs(key, value, timestamp, retentionTime, labels, uncompressed, chunkSizeBytes); + return ResponseParser.ToTimeStamp(_db.Execute(TS.DECRBY, args)); + } + + /// + /// Creates a new sample that decrements the latest sample's value. + /// + /// Key name for timeseries + /// Delta to substract + /// Optional: TimeStamp to add. UNIX timestamp of the sample. * can be used for automatic timestamp (using the system clock) + /// Optional: Maximum age for samples compared to last event time (in milliseconds) + /// Optional: Collaction of label-value pairs that represent metadata labels of the key + /// Optional: Adding this flag will keep data in an uncompressed form + /// Optional: Each time-series uses chunks of memory of fixed size for time series samples. + /// You can alter the default TS_db chunk size by passing the chunk_size argument (in Bytes) + /// The latests sample timestamp (updated sample) + public async Task DecrByAsync(string key, double value, TimeStamp? timestamp = null, long? retentionTime = null, IReadOnlyCollection? labels = null, bool? uncompressed = null, long? chunkSizeBytes = null) + { + var args = TimeSeriesAux.BuildTsIncrDecrByArgs(key, value, timestamp, retentionTime, labels, uncompressed, chunkSizeBytes); + return ResponseParser.ToTimeStamp(await _db.ExecuteAsync(TS.DECRBY, args)); + } + + /// + /// Delete data points for a given timeseries and interval range in the form of start and end delete timestamps. + /// The given timestamp interval is closed (inclusive), meaning start and end data points will also be deleted. + /// + /// Key name for timeseries + /// Start timestamp for the range deletion. + /// End timestamp for the range deletion. + /// The count of deleted items + public long Del(string key, TimeStamp fromTimeStamp, TimeStamp toTimeStamp) + { + var args = TimeSeriesAux.BuildTsDelArgs(key, fromTimeStamp, toTimeStamp); + return ResponseParser.ToLong(_db.Execute(TS.DEL, args)); + } + + /// + /// Delete data points for a given timeseries and interval range in the form of start and end delete timestamps. + /// The given timestamp interval is closed (inclusive), meaning start and end data points will also be deleted. + /// + /// Key name for timeseries + /// Start timestamp for the range deletion. + /// End timestamp for the range deletion. + /// The count of deleted items + public async Task DelAsync(string key, TimeStamp fromTimeStamp, TimeStamp toTimeStamp) + { + var args = TimeSeriesAux.BuildTsDelArgs(key, fromTimeStamp, toTimeStamp); + return ResponseParser.ToLong(await _db.ExecuteAsync(TS.DEL, args)); + } + + #endregion + + #region Aggregation, Compaction, Downsampling + + /// + /// Create a compaction rule. + /// + /// Key name for source time series + /// TimeSeries rule: + /// Key name for destination time series, Aggregation type and Time bucket for aggregation in milliseconds + /// If the operation executed successfully + public bool CreateRule(string sourceKey, TimeSeriesRule rule) + { + var args = new List { sourceKey }; + args.AddRule(rule); + return ResponseParser.OKtoBoolean(_db.Execute(TS.CREATERULE, args)); + } + + /// + /// Create a compaction rule. + /// + /// Key name for source time series + /// TimeSeries rule: + /// Key name for destination time series, Aggregation type and Time bucket for aggregation in milliseconds + /// If the operation executed successfully + public async Task CreateRuleAsync(string sourceKey, TimeSeriesRule rule) + { + var args = new List { sourceKey }; + args.AddRule(rule); + return ResponseParser.OKtoBoolean(await _db.ExecuteAsync(TS.CREATERULE, args)); + } + + /// + /// Deletes a compaction rule. + /// + /// Key name for source time series + /// Key name for destination time series + /// If the operation executed successfully + public bool DeleteRule(string sourceKey, string destKey) + { + var args = new List { sourceKey, destKey }; + return ResponseParser.OKtoBoolean(_db.Execute(TS.DELETERULE, args)); + } + + /// + /// Deletes a compaction rule. + /// + /// Key name for source time series + /// Key name for destination time series + /// If the operation executed successfully + public async Task DeleteRuleAsync(string sourceKey, string destKey) + { + var args = new List { sourceKey, destKey }; + return ResponseParser.OKtoBoolean(await _db.ExecuteAsync(TS.DELETERULE, args)); + } + + #endregion + + #region Query + + /// + /// Get the last sample. + /// + /// Key name for timeseries + /// TimeSeriesTuple that represents the last sample. Null if the series is empty. + public TimeSeriesTuple? Get(string key) + { + return ResponseParser.ToTimeSeriesTuple(_db.Execute(TS.GET, key)); + } + + /// + /// Get the last sample. + /// + /// Key name for timeseries + /// TimeSeriesTuple that represents the last sample. Null if the series is empty. + public async Task GetAsync(string key) + { + return ResponseParser.ToTimeSeriesTuple(await _db.ExecuteAsync(TS.GET, key)); + } + + /// + /// Get the last samples matching the specific filter. + /// + /// A sequence of filters + /// Optional: Include in the reply the label-value pairs that represent metadata labels of the time-series + /// The command returns the last sample for entries with labels matching the specified filter. + public IReadOnlyList<(string key, IReadOnlyList labels, TimeSeriesTuple value)> MGet(IReadOnlyCollection filter, bool? withLabels = null) + { + var args = TimeSeriesAux.BuildTsMgetArgs(filter, withLabels); + return ResponseParser.ParseMGetResponse(_db.Execute(TS.MGET, args)); + } + + /// + /// Get the last samples matching the specific filter. + /// + /// A sequence of filters + /// Optional: Include in the reply the label-value pairs that represent metadata labels of the time-series + /// The command returns the last sample for entries with labels matching the specified filter. + public async Task labels, TimeSeriesTuple value)>> MGetAsync(IReadOnlyCollection filter, bool? withLabels = null) + { + var args = TimeSeriesAux.BuildTsMgetArgs(filter, withLabels); + return ResponseParser.ParseMGetResponse(await _db.ExecuteAsync(TS.MGET, args)); + } + + /// + /// Query a range. + /// + /// Key name for timeseries + /// Start timestamp for the range query. "-" can be used to express the minimum possible timestamp. + /// End timestamp for range query, + can be used to express the maximum possible timestamp. + /// Optional: Returned list size. + /// Optional: Aggregation type + /// Optional: Time bucket for aggregation in milliseconds + /// Optional: List of timestamps to filter the result by specific timestamps + /// Optional: Filter result by value using minimum and maximum + /// Optional: Timestamp for alignment control for aggregation. + /// A list of TimeSeriesTuple + public IReadOnlyList Range(string key, + TimeStamp fromTimeStamp, + TimeStamp toTimeStamp, + long? count = null, + TsAggregation? aggregation = null, + long? timeBucket = null, + IReadOnlyCollection? filterByTs = null, + (long, long)? filterByValue = null, + TimeStamp? align = null) + { + var args = TimeSeriesAux.BuildRangeArgs(key, fromTimeStamp, toTimeStamp, count, aggregation, timeBucket, filterByTs, filterByValue, align); + return ResponseParser.ToTimeSeriesTupleArray(_db.Execute(TS.RANGE, args)); + } + + /// + /// Query a range. + /// + /// Key name for timeseries + /// Start timestamp for the range query. "-" can be used to express the minimum possible timestamp. + /// End timestamp for range query, + can be used to express the maximum possible timestamp. + /// Optional: Returned list size. + /// Optional: Aggregation type + /// Optional: Time bucket for aggregation in milliseconds + /// Optional: List of timestamps to filter the result by specific timestamps + /// Optional: Filter result by value using minimum and maximum + /// Optional: Timestamp for alignment control for aggregation. + /// A list of TimeSeriesTuple + public async Task> RangeAsync(string key, + TimeStamp fromTimeStamp, + TimeStamp toTimeStamp, + long? count = null, + TsAggregation? aggregation = null, + long? timeBucket = null, + IReadOnlyCollection? filterByTs = null, + (long, long)? filterByValue = null, + TimeStamp? align = null) + { + var args = TimeSeriesAux.BuildRangeArgs(key, fromTimeStamp, toTimeStamp, count, aggregation, timeBucket, filterByTs, filterByValue, align); + return ResponseParser.ToTimeSeriesTupleArray(await _db.ExecuteAsync(TS.RANGE, args)); + } + + /// + /// Query a range in reverse order. + /// + /// Key name for timeseries + /// Start timestamp for the range query. "-" can be used to express the minimum possible timestamp. + /// End timestamp for range query, + can be used to express the maximum possible timestamp. + /// Optional: Returned list size. + /// Optional: Aggregation type + /// Optional: Time bucket for aggregation in milliseconds + /// Optional: List of timestamps to filter the result by specific timestamps + /// Optional: Filter result by value using minimum and maximum + /// Optional: Timestamp for alignment control for aggregation. + /// A list of TimeSeriesTuple + public IReadOnlyList RevRange( + string key, + TimeStamp fromTimeStamp, + TimeStamp toTimeStamp, + long? count = null, + TsAggregation? aggregation = null, + long? timeBucket = null, + IReadOnlyCollection? filterByTs = null, + (long, long)? filterByValue = null, + TimeStamp? align = null) + { + var args = TimeSeriesAux.BuildRangeArgs(key, fromTimeStamp, toTimeStamp, count, aggregation, timeBucket, filterByTs, filterByValue, align); + return ResponseParser.ToTimeSeriesTupleArray(_db.Execute(TS.REVRANGE, args)); + } + + /// + /// Query a range in reverse order. + /// + /// Key name for timeseries + /// Start timestamp for the range query. "-" can be used to express the minimum possible timestamp. + /// End timestamp for range query, + can be used to express the maximum possible timestamp. + /// Optional: Returned list size. + /// Optional: Aggregation type + /// Optional: Time bucket for aggregation in milliseconds + /// Optional: List of timestamps to filter the result by specific timestamps + /// Optional: Filter result by value using minimum and maximum + /// Optional: Timestamp for alignment control for aggregation. + /// A list of TimeSeriesTuple + public async Task> RevRangeAsync( + string key, + TimeStamp fromTimeStamp, + TimeStamp toTimeStamp, + long? count = null, + TsAggregation? aggregation = null, + long? timeBucket = null, + IReadOnlyCollection? filterByTs = null, + (long, long)? filterByValue = null, + TimeStamp? align = null) + { + var args = TimeSeriesAux.BuildRangeArgs(key, fromTimeStamp, toTimeStamp, count, aggregation, timeBucket, filterByTs, filterByValue, align); + return ResponseParser.ToTimeSeriesTupleArray(await _db.ExecuteAsync(TS.REVRANGE, args)); + } + + /// + /// Query a timestamp range across multiple time-series by filters. + /// + /// Start timestamp for the range query. - can be used to express the minimum possible timestamp. + /// End timestamp for range query, + can be used to express the maximum possible timestamp. + /// A sequence of filters + /// Optional: Maximum number of returned results per time-series. + /// Optional: Aggregation type + /// Optional: Time bucket for aggregation in milliseconds + /// Optional: Include in the reply the label-value pairs that represent metadata labels of the time-series + /// Optional: Grouping by fields the results, and applying reducer functions on each group. + /// Optional: List of timestamps to filter the result by specific timestamps + /// Optional: Filter result by value using minimum and maximum + /// Optional: Include in the reply only a subset of the key-value pair labels of a series. + /// Optional: Timestamp for alignment control for aggregation. + /// A list of (key, labels, values) tuples. Each tuple contains the key name, its labels and the values which satisfies the given range and filters. + public IReadOnlyList<(string key, IReadOnlyList labels, IReadOnlyList values)> MRange( + TimeStamp fromTimeStamp, + TimeStamp toTimeStamp, + IReadOnlyCollection filter, + long? count = null, + TsAggregation? aggregation = null, + long? timeBucket = null, + bool? withLabels = null, + (string, TsReduce)? groupbyTuple = null, + IReadOnlyCollection? filterByTs = null, + (long, long)? filterByValue = null, + IReadOnlyCollection? selectLabels = null, + TimeStamp? align = null) + { + var args = TimeSeriesAux.BuildMultiRangeArgs(fromTimeStamp, toTimeStamp, filter, count, aggregation, timeBucket, withLabels, groupbyTuple, filterByTs, filterByValue, selectLabels, align); + return ResponseParser.ParseMRangeResponse(_db.Execute(TS.MRANGE, args)); + } + + /// + /// Query a timestamp range across multiple time-series by filters. + /// + /// Start timestamp for the range query. - can be used to express the minimum possible timestamp. + /// End timestamp for range query, + can be used to express the maximum possible timestamp. + /// A sequence of filters + /// Optional: Maximum number of returned results per time-series. + /// Optional: Aggregation type + /// Optional: Time bucket for aggregation in milliseconds + /// Optional: Include in the reply the label-value pairs that represent metadata labels of the time-series + /// Optional: Grouping by fields the results, and applying reducer functions on each group. + /// Optional: List of timestamps to filter the result by specific timestamps + /// Optional: Filter result by value using minimum and maximum + /// Optional: Include in the reply only a subset of the key-value pair labels of a series. + /// Optional: Timestamp for alignment control for aggregation. + /// A list of (key, labels, values) tuples. Each tuple contains the key name, its labels and the values which satisfies the given range and filters. + public async Task labels, IReadOnlyList values)>> MRangeAsync( + TimeStamp fromTimeStamp, + TimeStamp toTimeStamp, + IReadOnlyCollection filter, + long? count = null, + TsAggregation? aggregation = null, + long? timeBucket = null, + bool? withLabels = null, + (string, TsReduce)? groupbyTuple = null, + IReadOnlyCollection? filterByTs = null, + (long, long)? filterByValue = null, + IReadOnlyCollection? selectLabels = null, + TimeStamp? align = null) + { + var args = TimeSeriesAux.BuildMultiRangeArgs(fromTimeStamp, toTimeStamp, filter, count, aggregation, timeBucket, withLabels, groupbyTuple, filterByTs, filterByValue, selectLabels, align); + return ResponseParser.ParseMRangeResponse(await _db.ExecuteAsync(TS.MRANGE, args)); + } + + /// + /// Query a timestamp range in reverse order across multiple time-series by filters. + /// + /// Start timestamp for the range query. - can be used to express the minimum possible timestamp. + /// End timestamp for range query, + can be used to express the maximum possible timestamp. + /// A sequence of filters + /// Optional: Maximum number of returned results per time-series. + /// Optional: Aggregation type + /// Optional: Time bucket for aggregation in milliseconds + /// Optional: Include in the reply the label-value pairs that represent metadata labels of the time-series + /// Optional: Grouping by fields the results, and applying reducer functions on each group. + /// Optional: List of timestamps to filter the result by specific timestamps + /// Optional: Filter result by value using minimum and maximum + /// Optional: Include in the reply only a subset of the key-value pair labels of a series. + /// Optional: Timestamp for alignment control for aggregation. + /// A list of (key, labels, values) tuples. Each tuple contains the key name, its labels and the values which satisfies the given range and filters. + public IReadOnlyList<(string key, IReadOnlyList labels, IReadOnlyList values)> MRevRange( + TimeStamp fromTimeStamp, + TimeStamp toTimeStamp, + IReadOnlyCollection filter, + long? count = null, + TsAggregation? aggregation = null, + long? timeBucket = null, + bool? withLabels = null, + (string, TsReduce)? groupbyTuple = null, + IReadOnlyCollection? filterByTs = null, + (long, long)? filterByValue = null, + IReadOnlyCollection? selectLabels = null, + TimeStamp? align = null) + { + var args = TimeSeriesAux.BuildMultiRangeArgs(fromTimeStamp, toTimeStamp, filter, count, aggregation, timeBucket, withLabels, groupbyTuple, filterByTs, filterByValue, selectLabels, align); + return ResponseParser.ParseMRangeResponse(_db.Execute(TS.MREVRANGE, args)); + } + + /// + /// Query a timestamp range in reverse order across multiple time-series by filters. + /// + /// Start timestamp for the range query. - can be used to express the minimum possible timestamp. + /// End timestamp for range query, + can be used to express the maximum possible timestamp. + /// A sequence of filters + /// Optional: Maximum number of returned results per time-series. + /// Optional: Aggregation type + /// Optional: Time bucket for aggregation in milliseconds + /// Optional: Include in the reply the label-value pairs that represent metadata labels of the time-series + /// Optional: Grouping by fields the results, and applying reducer functions on each group. + /// Optional: List of timestamps to filter the result by specific timestamps + /// Optional: Filter result by value using minimum and maximum + /// Optional: Include in the reply only a subset of the key-value pair labels of a series. + /// Optional: Timestamp for alignment control for aggregation. + /// A list of (key, labels, values) tuples. Each tuple contains the key name, its labels and the values which satisfies the given range and filters. + public async Task labels, IReadOnlyList values)>> MRevRangeAsync( + TimeStamp fromTimeStamp, + TimeStamp toTimeStamp, + IReadOnlyCollection filter, + long? count = null, + TsAggregation? aggregation = null, + long? timeBucket = null, + bool? withLabels = null, + (string, TsReduce)? groupbyTuple = null, + IReadOnlyCollection? filterByTs = null, + (long, long)? filterByValue = null, + IReadOnlyCollection? selectLabels = null, + TimeStamp? align = null) + { + var args = TimeSeriesAux.BuildMultiRangeArgs(fromTimeStamp, toTimeStamp, filter, count, aggregation, timeBucket, withLabels, groupbyTuple, filterByTs, filterByValue, selectLabels, align); + return ResponseParser.ParseMRangeResponse(await _db.ExecuteAsync(TS.MREVRANGE, args)); + } + + #endregion + + #region General + + /// + /// Returns the information for a specific time-series key. + /// + /// Key name for timeseries + /// TimeSeriesInformation for the specific key. + public TimeSeriesInformation Info(string key) + { + return ResponseParser.ToTimeSeriesInfo(_db.Execute(TS.INFO, key)); + } + + /// + /// Returns the information for a specific time-series key. + /// + /// Key name for timeseries + /// TimeSeriesInformation for the specific key. + public async Task InfoAsync(string key) + { + return ResponseParser.ToTimeSeriesInfo(await _db.ExecuteAsync(TS.INFO, key)); + } + + /// + /// Get all the keys matching the filter list. + /// + /// A sequence of filters + /// A list of keys with labels matching the filters. + public IReadOnlyList QueryIndex(IReadOnlyCollection filter) + { + var args = new List(filter); + return ResponseParser.ToStringArray(_db.Execute(TS.QUERYINDEX, args)); + } + + /// + /// Get all the keys matching the filter list. + /// + /// A sequence of filters + /// A list of keys with labels matching the filters. + public async Task> QueryIndexAsync(IReadOnlyCollection filter) + { + var args = new List(filter); + return ResponseParser.ToStringArray(await _db.ExecuteAsync(TS.QUERYINDEX, args)); + } + + #endregion + + + } diff --git a/tests/NRedisStack.Tests/AbstractNRedisStackTest.cs b/tests/NRedisStack.Tests/AbstractNRedisStackTest.cs index 095b4ac4..ae94fefa 100644 --- a/tests/NRedisStack.Tests/AbstractNRedisStackTest.cs +++ b/tests/NRedisStack.Tests/AbstractNRedisStackTest.cs @@ -1,4 +1,5 @@ +using NRedisStack.DataTypes; using StackExchange.Redis; using System; using System.Collections.Generic; @@ -33,6 +34,17 @@ protected internal string[] CreateKeyNames(int count, [CallerMemberName] string return newKeys; } + protected internal static List ReverseData(List data) + { + var tuples = new List(data.Count); + for (var i = data.Count - 1; i >= 0; i--) + { + tuples.Add(data[i]); + } + + return tuples; + } + public Task InitializeAsync() => Task.CompletedTask; public async Task DisposeAsync() diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestAdd.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestAdd.cs new file mode 100644 index 00000000..477b37ee --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestAdd.cs @@ -0,0 +1,218 @@ +using System; +using System.Collections.Generic; +using System.Threading; +using StackExchange.Redis; +using NRedisStack.DataTypes; +using NRedisStack.RedisStackCommands; +using NRedisStack.Literals.Enums; +using Xunit; +using NRedisStack.Tests; +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestAdd : AbstractNRedisStackTest, IDisposable + { + private readonly string key = "ADD_TESTS"; + + public TestAdd(RedisFixture redisFixture) : base(redisFixture) { } + + public void Dispose() + { + redisFixture.Redis.GetDatabase().KeyDelete(key); + } + + [Fact] + public void TestAddNotExistingTimeSeries() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + + TimeStamp now = DateTime.UtcNow; + Assert.Equal(now, db.TS().Add(key, now, 1.1)); + TimeSeriesInformation info = db.TS().Info(key); + Assert.Equal(now, info.FirstTimeStamp); + Assert.Equal(now, info.LastTimeStamp); + } + + [Fact] + public void TestAddExistingTimeSeries() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + + db.TS().Create(key); + TimeStamp now = DateTime.UtcNow; + Assert.Equal(now, db.TS().Add(key, now, 1.1)); + TimeSeriesInformation info = db.TS().Info(key); + Assert.Equal(now, info.FirstTimeStamp); + Assert.Equal(now, info.LastTimeStamp); + } + + [Fact] + public void TestAddStar() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + + db.TS().Add(key, "*", 1.1); + TimeSeriesInformation info = db.TS().Info(key); + Assert.True(info.FirstTimeStamp > 0); + Assert.Equal(info.FirstTimeStamp, info.LastTimeStamp); + } + + [Fact] + public void TestAddWithRetentionTime() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp now = DateTime.UtcNow; + long retentionTime = 5000; + Assert.Equal(now, db.TS().Add(key, now, 1.1, retentionTime: retentionTime)); + TimeSeriesInformation info = db.TS().Info(key); + Assert.Equal(now, info.FirstTimeStamp); + Assert.Equal(now, info.LastTimeStamp); + Assert.Equal(retentionTime, info.RetentionTime); + } + + [Fact] + public void TestAddWithLabels() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp now = DateTime.UtcNow; + TimeSeriesLabel label = new TimeSeriesLabel("key", "value"); + var labels = new List { label }; + Assert.Equal(now, db.TS().Add(key, now, 1.1, labels: labels)); + TimeSeriesInformation info = db.TS().Info(key); + Assert.Equal(now, info.FirstTimeStamp); + Assert.Equal(now, info.LastTimeStamp); + Assert.Equal(labels, info.Labels); + } + + [Fact] + public void TestAddWithUncompressed() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + db.TS().Create(key); + TimeStamp now = DateTime.UtcNow; + Assert.Equal(now, db.TS().Add(key, now, 1.1, uncompressed: true)); + TimeSeriesInformation info = db.TS().Info(key); + Assert.Equal(now, info.FirstTimeStamp); + Assert.Equal(now, info.LastTimeStamp); + } + + [Fact] + public void TestAddWithChunkSize() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp now = DateTime.UtcNow; + Assert.Equal(now, db.TS().Add(key, now, 1.1, chunkSizeBytes: 128)); + TimeSeriesInformation info = db.TS().Info(key); + Assert.Equal(now, info.FirstTimeStamp); + Assert.Equal(now, info.LastTimeStamp); + Assert.Equal(128, info.ChunkSize); + } + + [Fact] + public void TestAddWithDuplicatePolicyBlock() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp now = DateTime.UtcNow; + Assert.Equal(now, db.TS().Add(key, now, 1.1)); + Assert.Throws(() => db.TS().Add(key, now, 1.2)); + } + + [Fact] + public void TestAddWithDuplicatePolicyMin() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp now = DateTime.UtcNow; + Assert.Equal(now, db.TS().Add(key, now, 1.1)); + + // Insert a bigger number and check that it did not change the value. + Assert.Equal(now, db.TS().Add(key, now, 1.2, duplicatePolicy: TsDuplicatePolicy.MIN)); + Assert.Equal(1.1, db.TS().Range(key, now, now)[0].Val); + // Insert a smaller number and check that it changed. + Assert.Equal(now, db.TS().Add(key, now, 1.0, duplicatePolicy: TsDuplicatePolicy.MIN)); + Assert.Equal(1.0, db.TS().Range(key, now, now)[0].Val); + } + + [Fact] + public void TestAddWithDuplicatePolicyMax() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp now = DateTime.UtcNow; + Assert.Equal(now, db.TS().Add(key, now, 1.1)); + + // Insert a smaller number and check that it did not change the value. + Assert.Equal(now, db.TS().Add(key, now, 1.0, duplicatePolicy: TsDuplicatePolicy.MAX)); + Assert.Equal(1.1, db.TS().Range(key, now, now)[0].Val); + // Insert a bigger number and check that it changed. + Assert.Equal(now, db.TS().Add(key, now, 1.2, duplicatePolicy: TsDuplicatePolicy.MAX)); + Assert.Equal(1.2, db.TS().Range(key, now, now)[0].Val); + } + + [Fact] + public void TestAddWithDuplicatePolicySum() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp now = DateTime.UtcNow; + Assert.Equal(now, db.TS().Add(key, now, 1.1)); + Assert.Equal(now, db.TS().Add(key, now, 1.0, duplicatePolicy: TsDuplicatePolicy.SUM)); + Assert.Equal(2.1, db.TS().Range(key, now, now)[0].Val); + } + + [Fact] + public void TestAddWithDuplicatePolicyFirst() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp now = DateTime.UtcNow; + Assert.Equal(now, db.TS().Add(key, now, 1.1)); + Assert.Equal(now, db.TS().Add(key, now, 1.0, duplicatePolicy: TsDuplicatePolicy.FIRST)); + Assert.Equal(1.1, db.TS().Range(key, now, now)[0].Val); + } + + [Fact] + public void TestAddWithDuplicatePolicyLast() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp now = DateTime.UtcNow; + Assert.Equal(now, db.TS().Add(key, now, 1.1)); + Assert.Equal(now, db.TS().Add(key, now, 1.0, duplicatePolicy: TsDuplicatePolicy.LAST)); + Assert.Equal(1.0, db.TS().Range(key, now, now)[0].Val); + } + + [Fact] + public void TestOldAdd() + { + TimeStamp old_dt = DateTime.UtcNow; + Thread.Sleep(1000); + TimeStamp new_dt = DateTime.UtcNow; + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + db.TS().Create(key); + db.TS().Add(key, new_dt, 1.1); + // Adding old event + Assert.Equal(old_dt, db.TS().Add(key, old_dt, 1.1)); + } + + [Fact] + public void TestWrongParameters() + { + double value = 1.1; + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var ex = Assert.Throws(() => db.TS().Add(key, "+", value)); + Assert.Equal("ERR TSDB: invalid timestamp", ex.Message); + ex = Assert.Throws(() => db.TS().Add(key, "-", value)); + Assert.Equal("ERR TSDB: invalid timestamp", ex.Message); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestAddAsync.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestAddAsync.cs new file mode 100644 index 00000000..ce5bd07b --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestAddAsync.cs @@ -0,0 +1,237 @@ +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using StackExchange.Redis; +using NRedisStack.DataTypes; +using NRedisStack.Literals.Enums; +using NRedisStack.RedisStackCommands; +using Xunit; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestAddAsync : AbstractNRedisStackTest + { + public TestAddAsync(RedisFixture redisFixture) : base(redisFixture) { } + + [Fact] + public async Task TestAddNotExistingTimeSeries() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp timeStamp = DateTime.UtcNow; + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.1)); + + var info = await db.TS().InfoAsync(key); + Assert.Equal(timeStamp, info.FirstTimeStamp); + Assert.Equal(timeStamp, info.LastTimeStamp); + } + + [Fact] + public async Task TestAddExistingTimeSeries() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + await db.TS().CreateAsync(key); + TimeStamp timeStamp = DateTime.UtcNow; + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.1)); + + var info = await db.TS().InfoAsync(key); + Assert.Equal(timeStamp, info.FirstTimeStamp); + Assert.Equal(timeStamp, info.LastTimeStamp); + } + + [Fact] + public async Task TestAddStar() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + await db.TS().AddAsync(key, "*", 1.1); + var info = await db.TS().InfoAsync(key); + Assert.True(info.FirstTimeStamp > 0); + Assert.Equal(info.FirstTimeStamp, info.LastTimeStamp); + } + + [Fact] + public async Task TestAddWithRetentionTime() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp timeStamp = DateTime.UtcNow; + long retentionTime = 5000; + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.1, retentionTime: retentionTime)); + + var info = await db.TS().InfoAsync(key); + Assert.Equal(timeStamp, info.FirstTimeStamp); + Assert.Equal(timeStamp, info.LastTimeStamp); + Assert.Equal(retentionTime, info.RetentionTime); + } + + [Fact] + public async Task TestAddWithLabels() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp timeStamp = DateTime.UtcNow; + var label = new TimeSeriesLabel("key", "value"); + var labels = new List { label }; + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.1, labels: labels)); + + var info = await db.TS().InfoAsync(key); + Assert.Equal(timeStamp, info.FirstTimeStamp); + Assert.Equal(timeStamp, info.LastTimeStamp); + Assert.Equal(labels, info.Labels); + } + + [Fact] + public async Task TestAddWithChunkSize() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp timeStamp = DateTime.UtcNow; + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.1, chunkSizeBytes: 128)); + var info = await db.TS().InfoAsync(key); + Assert.Equal(timeStamp, info.FirstTimeStamp); + Assert.Equal(timeStamp, info.LastTimeStamp); + Assert.Equal(128, info.ChunkSize); + } + + [Fact] + public async Task TestAddWithUncompressed() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + await db.TS().CreateAsync(key); + TimeStamp timeStamp = DateTime.UtcNow; + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.1, uncompressed: true)); + + var info = await db.TS().InfoAsync(key); + Assert.Equal(timeStamp, info.FirstTimeStamp); + Assert.Equal(timeStamp, info.LastTimeStamp); + } + + [Fact] + public async Task TestAddWithDuplicatePolicyBlock() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp timeStamp = DateTime.UtcNow; + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.1)); + await Assert.ThrowsAsync(async () => await db.TS().AddAsync(key, timeStamp, 1.2)); + } + + [Fact] + public async Task TestAddWithDuplicatePolicyMin() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp timeStamp = DateTime.UtcNow; + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.1)); + + // Insert a bigger number and check that it did not change the value. + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.2, duplicatePolicy: TsDuplicatePolicy.MIN)); + IReadOnlyList results = await db.TS().RangeAsync(key, timeStamp, timeStamp); + Assert.Equal(1.1, results[0].Val); + + // Insert a smaller number and check that it changed. + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.0, duplicatePolicy: TsDuplicatePolicy.MIN)); + results = await db.TS().RangeAsync(key, timeStamp, timeStamp); + Assert.Equal(1.0, results[0].Val); + } + + [Fact] + public async Task TestAddWithDuplicatePolicyMax() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp timeStamp = DateTime.UtcNow; + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.1)); + + // Insert a smaller number and check that it did not change the value. + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.0, duplicatePolicy: TsDuplicatePolicy.MAX)); + IReadOnlyList results = await db.TS().RangeAsync(key, timeStamp, timeStamp); + Assert.Equal(1.1, results[0].Val); + // Insert a bigger number and check that it changed. + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.2, duplicatePolicy: TsDuplicatePolicy.MAX)); + results = await db.TS().RangeAsync(key, timeStamp, timeStamp); + Assert.Equal(1.2, results[0].Val); + } + + [Fact] + public async Task TestAddWithDuplicatePolicySum() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp timeStamp = DateTime.UtcNow; + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.1)); + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.0, duplicatePolicy: TsDuplicatePolicy.SUM)); + IReadOnlyList results = await db.TS().RangeAsync(key, timeStamp, timeStamp); + Assert.Equal(2.1, results[0].Val); + } + + [Fact] + public async Task TestAddWithDuplicatePolicyFirst() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp timeStamp = DateTime.UtcNow; + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.1)); + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.0, duplicatePolicy: TsDuplicatePolicy.FIRST)); + IReadOnlyList results = await db.TS().RangeAsync(key, timeStamp, timeStamp); + Assert.Equal(1.1, results[0].Val); + } + + [Fact] + public async Task TestAddWithDuplicatePolicyLast() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp timeStamp = DateTime.UtcNow; + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.1)); + Assert.Equal(timeStamp, await db.TS().AddAsync(key, timeStamp, 1.0, duplicatePolicy: TsDuplicatePolicy.LAST)); + IReadOnlyList results = await db.TS().RangeAsync(key, timeStamp, timeStamp); + Assert.Equal(1.0, results[0].Val); + } + + [Fact] + public async Task TestOldAdd() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var dateTime = DateTime.UtcNow; + TimeStamp oldTimeStamp = dateTime.AddSeconds(-1); + TimeStamp newTimeStamp = dateTime; + await db.TS().CreateAsync(key); + await db.TS().AddAsync(key, newTimeStamp, 1.1); + // Adding old event + Assert.Equal(oldTimeStamp, await db.TS().AddAsync(key, oldTimeStamp, 1.1)); + } + + [Fact] + public async Task TestWrongParameters() + { + var key = CreateKeyName(); + var value = 1.1; + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var ex = await Assert.ThrowsAsync(async () => await db.TS().AddAsync(key, "+", value)); + Assert.Equal("ERR TSDB: invalid timestamp", ex.Message); + + ex = await Assert.ThrowsAsync(async () => await db.TS().AddAsync(key, "-", value)); + Assert.Equal("ERR TSDB: invalid timestamp", ex.Message); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestAlter.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestAlter.cs new file mode 100644 index 00000000..4b09aa7f --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestAlter.cs @@ -0,0 +1,51 @@ +using System; +using System.Collections.Generic; +using NRedisStack.DataTypes; +using NRedisStack.RedisStackCommands; +using StackExchange.Redis; +using Xunit; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestAlter : AbstractNRedisStackTest, IDisposable + { + private readonly string key = "ALTER_TESTS"; + + public TestAlter(RedisFixture redisFixture) : base(redisFixture) { } + + public void Dispose() + { + redisFixture.Redis.GetDatabase().KeyDelete(key); + } + + [Fact] + public void TestAlterRetentionTime() + { + long retentionTime = 5000; + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + db.TS().Create(key); + Assert.True(db.TS().Alter(key, retentionTime: retentionTime)); + TimeSeriesInformation info = db.TS().Info(key); + Assert.Equal(retentionTime, info.RetentionTime); + } + + [Fact] + public void TestAlterLabels() + { + TimeSeriesLabel label = new TimeSeriesLabel("key", "value"); + var labels = new List { label }; + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + db.TS().Create(key); + Assert.True(db.TS().Alter(key, labels: labels)); + TimeSeriesInformation info = db.TS().Info(key); + Assert.Equal(labels, info.Labels); + labels.Clear(); + Assert.True(db.TS().Alter(key, labels: labels)); + info = db.TS().Info(key); + Assert.Equal(labels, info.Labels); + } + + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestAlterAsync.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestAlterAsync.cs new file mode 100644 index 00000000..6d3966d9 --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestAlterAsync.cs @@ -0,0 +1,49 @@ +using NRedisStack.DataTypes; +using NRedisStack.RedisStackCommands; +using System.Collections.Generic; +using System.Threading.Tasks; +using Xunit; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestAlterAsync : AbstractNRedisStackTest + { + public TestAlterAsync(RedisFixture redisFixture) : base(redisFixture) { } + + [Fact] + public async Task TestAlterRetentionTime() + { + var key = CreateKeyName(); + long retentionTime = 5000; + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + await db.TS().CreateAsync(key); + Assert.True(await db.TS().AlterAsync(key, retentionTime: retentionTime)); + + var info = await db.TS().InfoAsync(key); + Assert.Equal(retentionTime, info.RetentionTime); + } + + [Fact] + public async Task TestAlterLabels() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel("key", "value"); + var labels = new List { label }; + await db.TS().CreateAsync(key); + Assert.True(await db.TS().AlterAsync(key, labels: labels)); + + var info = await db.TS().InfoAsync(key); + Assert.Equal(labels, info.Labels); + + labels.Clear(); + Assert.True(await db.TS().AlterAsync(key, labels: labels)); + + info = await db.TS().InfoAsync(key); + Assert.Equal(labels, info.Labels); + } + + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestCreate.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestCreate.cs new file mode 100644 index 00000000..e3f6e29e --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestCreate.cs @@ -0,0 +1,114 @@ +using System; +using System.Collections.Generic; +using StackExchange.Redis; +using NRedisTimeSeries; +using NRedisStack.DataTypes; +using NRedisStack.RedisStackCommands; +using NRedisStack.Literals.Enums; +using Xunit; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestCreate : AbstractNRedisStackTest, IDisposable + { + private readonly string key = "CREATE_TESTS"; + + public TestCreate(RedisFixture redisFixture) : base(redisFixture) { } + + public void Dispose() + { + redisFixture.Redis.GetDatabase().KeyDelete(key); + } + + [Fact] + public void TestCreateOK() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(db.TS().Create(key)); + TimeSeriesInformation info = db.TS().Info(key); + } + + [Fact] + public void TestCreateRetentionTime() + { + long retentionTime = 5000; + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(db.TS().Create(key, retentionTime: retentionTime)); + TimeSeriesInformation info = db.TS().Info(key); + Assert.Equal(retentionTime, info.RetentionTime); + } + + [Fact] + public void TestCreateLabels() + { + TimeSeriesLabel label = new TimeSeriesLabel("key", "value"); + var labels = new List { label }; + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(db.TS().Create(key, labels: labels)); + TimeSeriesInformation info = db.TS().Info(key); + Assert.Equal(labels, info.Labels); + } + + [Fact] + public void TestCreateEmptyLabels() + { + var labels = new List(); + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(db.TS().Create(key, labels: labels)); + TimeSeriesInformation info = db.TS().Info(key); + Assert.Equal(labels, info.Labels); + } + + [Fact] + public void TestCreateUncompressed() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(db.TS().Create(key, uncompressed: true)); + } + + [Fact] + public void TestCreatehDuplicatePolicyFirst() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(db.TS().Create(key, duplicatePolicy: TsDuplicatePolicy.FIRST)); + } + + [Fact] + public void TestCreatehDuplicatePolicyLast() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(db.TS().Create(key, duplicatePolicy: TsDuplicatePolicy.LAST)); + } + + [Fact] + public void TestCreatehDuplicatePolicyMin() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(db.TS().Create(key, duplicatePolicy: TsDuplicatePolicy.MIN)); + } + + [Fact] + public void TestCreatehDuplicatePolicyMax() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(db.TS().Create(key, duplicatePolicy: TsDuplicatePolicy.MAX)); + } + + [Fact] + public void TestCreatehDuplicatePolicySum() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(db.TS().Create(key, duplicatePolicy: TsDuplicatePolicy.SUM)); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestCreateAsync.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestCreateAsync.cs new file mode 100644 index 00000000..92725f17 --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestCreateAsync.cs @@ -0,0 +1,117 @@ +using NRedisStack.DataTypes; +using NRedisStack.Literals.Enums; +using NRedisStack.RedisStackCommands; +using System.Collections.Generic; +using System.Threading.Tasks; +using Xunit; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestCreateAsync : AbstractNRedisStackTest + { + public TestCreateAsync(RedisFixture redisFixture) : base(redisFixture) { } + + [Fact] + public async Task TestCreateOK() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(await db.TS().CreateAsync(key)); + } + + [Fact] + public async Task TestCreateRetentionTime() + { + var key = CreateKeyName(); + long retentionTime = 5000; + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(await db.TS().CreateAsync(key, retentionTime: retentionTime)); + + var info = await db.TS().InfoAsync(key); + Assert.Equal(retentionTime, info.RetentionTime); + } + + [Fact] + public async Task TestCreateLabels() + { + var key = CreateKeyName(); + var label = new TimeSeriesLabel("key", "value"); + var labels = new List { label }; + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(await db.TS().CreateAsync(key, labels: labels)); + + var info = await db.TS().InfoAsync(key); + Assert.Equal(labels, info.Labels); + } + + [Fact] + public async Task TestCreateEmptyLabels() + { + var key = CreateKeyName(); + var labels = new List(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(await db.TS().CreateAsync(key, labels: labels)); + + var info = await db.TS().InfoAsync(key); + Assert.Equal(labels, info.Labels); + } + + [Fact] + public async Task TestCreateUncompressed() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(await db.TS().CreateAsync(key, uncompressed: true)); + } + + [Fact] + public async void TestCreatehDuplicatePolicyFirst() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(await db.TS().CreateAsync(key, duplicatePolicy: TsDuplicatePolicy.FIRST)); + } + + [Fact] + public async void TestCreatehDuplicatePolicyLast() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(await db.TS().CreateAsync(key, duplicatePolicy: TsDuplicatePolicy.LAST)); + } + + [Fact] + public async void TestCreatehDuplicatePolicyMin() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(await db.TS().CreateAsync(key, duplicatePolicy: TsDuplicatePolicy.MIN)); + } + + [Fact] + public async void TestCreatehDuplicatePolicyMax() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(await db.TS().CreateAsync(key, duplicatePolicy: TsDuplicatePolicy.MAX)); + } + + [Fact] + public async void TestCreatehDuplicatePolicySum() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(await db.TS().CreateAsync(key, duplicatePolicy: TsDuplicatePolicy.SUM)); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestDecrBy.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestDecrBy.cs new file mode 100644 index 00000000..a7fe22ff --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestDecrBy.cs @@ -0,0 +1,101 @@ +using System; +using System.Collections.Generic; +using NRedisStack.DataTypes; +using NRedisStack.RedisStackCommands; +using StackExchange.Redis; +using Xunit; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestDecrBy : AbstractNRedisStackTest, IDisposable + { + private readonly string key = "DECRBY_TESTS"; + + public TestDecrBy(RedisFixture redisFixture) : base(redisFixture) { } + + public void Dispose() + { + redisFixture.Redis.GetDatabase().KeyDelete(key); + } + + [Fact] + public void TestDefaultDecrBy() + { + double value = 5.5; + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(db.TS().DecrBy(key, -value) > 0); + Assert.Equal(value, db.TS().Get(key).Val); + } + + [Fact] + public void TestStarDecrBy() + { + double value = 5.5; + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(db.TS().DecrBy(key, -value, timestamp: "*") > 0); + Assert.Equal(value, db.TS().Get(key).Val); + } + + [Fact] + public void TestDecrByTimeStamp() + { + double value = 5.5; + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp timeStamp = DateTime.UtcNow; + Assert.Equal(timeStamp, db.TS().DecrBy(key, -value, timestamp: timeStamp)); + Assert.Equal(new TimeSeriesTuple(timeStamp, value), db.TS().Get(key)); + } + + [Fact] + public void TestDefaultDecrByWithRetentionTime() + { + double value = 5.5; + long retentionTime = 5000; + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(db.TS().DecrBy(key, -value, retentionTime: retentionTime) > 0); + Assert.Equal(value, db.TS().Get(key).Val); + TimeSeriesInformation info = db.TS().Info(key); + Assert.Equal(retentionTime, info.RetentionTime); + } + + [Fact] + public void TestDefaultDecrByWithLabels() + { + double value = 5.5; + TimeSeriesLabel label = new TimeSeriesLabel("key", "value"); + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var labels = new List { label }; + Assert.True(db.TS().DecrBy(key, -value, labels: labels) > 0); + Assert.Equal(value, db.TS().Get(key).Val); + TimeSeriesInformation info = db.TS().Info(key); + Assert.Equal(labels, info.Labels); + } + + [Fact] + public void TestDefaultDecrByWithUncompressed() + { + double value = 5.5; + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(db.TS().DecrBy(key, -value, uncompressed: true) > 0); + Assert.Equal(value, db.TS().Get(key).Val); + } + + [Fact] + public void TestWrongParameters() + { + double value = 5.5; + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var ex = Assert.Throws(() => db.TS().DecrBy(key, value, timestamp: "+")); + Assert.Equal("ERR TSDB: invalid timestamp", ex.Message); + ex = Assert.Throws(() => db.TS().DecrBy(key, value, timestamp: "-")); + Assert.Equal("ERR TSDB: invalid timestamp", ex.Message); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestDecrByAsync.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestDecrByAsync.cs new file mode 100644 index 00000000..34ef7180 --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestDecrByAsync.cs @@ -0,0 +1,115 @@ +using NRedisStack.DataTypes; +using NRedisStack.RedisStackCommands; +using StackExchange.Redis; +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using Xunit; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestDecrByAsync : AbstractNRedisStackTest + { + public TestDecrByAsync(RedisFixture redisFixture) : base(redisFixture) { } + + [Fact] + public async Task TestDefaultDecrBy() + { + var key = CreateKeyName(); + var value = 5.5; + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(await db.TS().DecrByAsync(key, -value) > 0); + + var result = await db.TS().GetAsync(key); + Assert.Equal(value, result.Val); + } + + [Fact] + public async Task TestStarDecrBy() + { + var key = CreateKeyName(); + var value = 5.5; + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(await db.TS().DecrByAsync(key, -value, timestamp: "*") > 0); + + var result = await db.TS().GetAsync(key); + Assert.Equal(value, result.Val); + } + + [Fact] + public async Task TestDecrByTimeStamp() + { + var key = CreateKeyName(); + var value = 5.5; + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp timeStamp = DateTime.UtcNow; + Assert.Equal(timeStamp, await db.TS().DecrByAsync(key, -value, timestamp: timeStamp)); + Assert.Equal(new TimeSeriesTuple(timeStamp, value), await db.TS().GetAsync(key)); + } + + [Fact] + public async Task TestDefaultDecrByWithRetentionTime() + { + var key = CreateKeyName(); + var value = 5.5; + long retentionTime = 5000; + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(await db.TS().DecrByAsync(key, -value, retentionTime: retentionTime) > 0); + + var result = await db.TS().GetAsync(key); + Assert.Equal(value, result.Val); + + var info = await db.TS().InfoAsync(key); + Assert.Equal(retentionTime, info.RetentionTime); + } + + [Fact] + public async Task TestDefaultDecrByWithLabels() + { + var key = CreateKeyName(); + var value = 5.5; + var label = new TimeSeriesLabel("key", "value"); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var labels = new List { label }; + Assert.True(await db.TS().DecrByAsync(key, -value, labels: labels) > 0); + + var result = await db.TS().GetAsync(key); + Assert.Equal(value, result.Val); + + var info = await db.TS().InfoAsync(key); + Assert.Equal(labels, info.Labels); + } + + [Fact] + public async Task TestDefaultDecrByWithUncompressed() + { + var key = CreateKeyName(); + var value = 5.5; + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(await db.TS().DecrByAsync(key, -value, uncompressed: true) > 0); + + var result = await db.TS().GetAsync(key); + Assert.Equal(value, result.Val); + } + + [Fact] + public async Task TestWrongParameters() + { + var key = CreateKeyName(); + var value = 5.5; + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var ex = await Assert.ThrowsAsync(async () => await db.TS().DecrByAsync(key, value, timestamp: "+")); + Assert.Equal("ERR TSDB: invalid timestamp", ex.Message); + + ex = await Assert.ThrowsAsync(async () => await db.TS().DecrByAsync(key, value, timestamp: "-")); + Assert.Equal("ERR TSDB: invalid timestamp", ex.Message); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestDel.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestDel.cs new file mode 100644 index 00000000..fff0bcec --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestDel.cs @@ -0,0 +1,58 @@ +using NRedisStack.DataTypes; +using NRedisStack.RedisStackCommands; +using StackExchange.Redis; +using System; +using System.Collections.Generic; +using Xunit; + + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestDel : AbstractNRedisStackTest, IDisposable + { + private readonly string key = "DEL_TESTS"; + + public TestDel(RedisFixture redisFixture) : base(redisFixture) { } + + public void Dispose() + { + redisFixture.Redis.GetDatabase().KeyDelete(key); + } + + private List CreateData(IDatabase db, int timeBucket) + { + var tuples = new List(); + for (int i = 0; i < 10; i++) + { + TimeStamp ts = db.TS().Add(key, i * timeBucket, i); + tuples.Add(new TimeSeriesTuple(ts, i)); + } + return tuples; + } + + [Fact] + public void TestDelNotExists() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var ex = Assert.Throws(() => db.TS().Del(key, "-", "+")); + Assert.Equal("ERR TSDB: the key does not exist", ex.Message); + } + + [Fact] + public void TestDelRange() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = CreateData(db, 50); + TimeStamp from = tuples[0].Time; + TimeStamp to = tuples[5].Time; + Assert.Equal(6, db.TS().Del(key, from, to)); + + // check that the operation deleted the timestamps + IReadOnlyList res = db.TS().Range(key, from, to); + Assert.Equal(0, res.Count); + Assert.NotNull(db.TS().Get(key)); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestDelAsync.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestDelAsync.cs new file mode 100644 index 00000000..19957736 --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestDelAsync.cs @@ -0,0 +1,52 @@ +using NRedisStack.DataTypes; +using NRedisStack.RedisStackCommands; +using StackExchange.Redis; +using System.Collections.Generic; +using System.Threading.Tasks; +using Xunit; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestDelAsync : AbstractNRedisStackTest + { + public TestDelAsync(RedisFixture redisFixture) : base(redisFixture) { } + + private async Task> CreateData(IDatabase db, string key, int timeBucket) + { + var tuples = new List(); + for (var i = 0; i < 10; i++) + { + var ts = await db.TS().AddAsync(key, i * timeBucket, i); + tuples.Add(new TimeSeriesTuple(ts, i)); + } + return tuples; + } + + [Fact] + public async Task TestDelNotExists() + { + var key = CreateKeyName(); + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var ex = await Assert.ThrowsAsync(async () => await db.TS().DelAsync(key, "-", "+")); + Assert.Equal("ERR TSDB: the key does not exist", ex.Message); + } + + [Fact] + public async Task TestDelRange() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var key = CreateKeyName(); + var tuples = await CreateData(db, key, 50); + TimeStamp from = tuples[0].Time; + TimeStamp to = tuples[5].Time; + Assert.Equal(6, await db.TS().DelAsync(key, from, to)); + + // check that the operation deleted the timestamps + IReadOnlyList res = await db.TS().RangeAsync(key, from, to); + Assert.Equal(0, res.Count); + Assert.NotNull(await db.TS().GetAsync(key)); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestGet.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestGet.cs new file mode 100644 index 00000000..1533e794 --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestGet.cs @@ -0,0 +1,52 @@ +using NRedisStack.DataTypes; +using NRedisStack.RedisStackCommands; +using StackExchange.Redis; +using System; +using Xunit; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestGet : AbstractNRedisStackTest, IDisposable + { + + private readonly string key = "GET_TESTS"; + + public TestGet(RedisFixture redisFixture) : base(redisFixture) { } + + public void Dispose() + { + redisFixture.Redis.GetDatabase().KeyDelete(key); + } + + [Fact] + public void TestGetNotExists() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var ex = Assert.Throws(() => db.TS().Get(key)); + Assert.Equal("ERR TSDB: the key does not exist", ex.Message); + } + + [Fact] + public void TestEmptyGet() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + db.TS().Create(key); + Assert.Null(db.TS().Get(key)); + } + + [Fact] + public void TestAddAndGet() + { + DateTime now = DateTime.UtcNow; + TimeSeriesTuple expected = new TimeSeriesTuple(now, 1.1); + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + db.TS().Create(key); + db.TS().Add(key, now, 1.1); + TimeSeriesTuple actual = db.TS().Get(key); + Assert.Equal(expected, actual); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestGetAsync.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestGetAsync.cs new file mode 100644 index 00000000..bf33fcd5 --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestGetAsync.cs @@ -0,0 +1,48 @@ +using NRedisStack.DataTypes; +using NRedisStack.RedisStackCommands; +using StackExchange.Redis; +using System; +using System.Threading.Tasks; +using Xunit; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestGetAsync : AbstractNRedisStackTest + { + public TestGetAsync(RedisFixture redisFixture) : base(redisFixture) { } + + [Fact] + public async Task TestGetNotExists() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var ex = await Assert.ThrowsAsync(async () => await db.TS().GetAsync(key)); + Assert.Equal("ERR TSDB: the key does not exist", ex.Message); + } + + [Fact] + public async Task TestEmptyGet() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + await db.TS().CreateAsync(key); + Assert.Null(await db.TS().GetAsync(key)); + } + + [Fact] + public async Task TestAddAndGet() + { + var key = CreateKeyName(); + var now = DateTime.UtcNow; + var expected = new TimeSeriesTuple(now, 1.1); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + await db.TS().CreateAsync(key); + await db.TS().AddAsync(key, now, 1.1); + var actual = await db.TS().GetAsync(key); + Assert.Equal(expected, actual); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestIncrBy.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestIncrBy.cs new file mode 100644 index 00000000..9673be58 --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestIncrBy.cs @@ -0,0 +1,101 @@ +using System; +using System.Collections.Generic; +using NRedisStack.RedisStackCommands; +using NRedisStack.DataTypes; +using StackExchange.Redis; +using Xunit; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestIncrBy : AbstractNRedisStackTest, IDisposable + { + private readonly string key = "INCRBY_TESTS"; + + public TestIncrBy(RedisFixture redisFixture) : base(redisFixture) { } + + public void Dispose() + { + redisFixture.Redis.GetDatabase().KeyDelete(key); + } + + [Fact] + public void TestDefaultIncrBy() + { + double value = 5.5; + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(db.TS().IncrBy(key, value) > 0); + Assert.Equal(value, db.TS().Get(key).Val); + } + + [Fact] + public void TestStarIncrBy() + { + double value = 5.5; + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(db.TS().IncrBy(key, value, timestamp: "*") > 0); + Assert.Equal(value, db.TS().Get(key).Val); + } + + [Fact] + public void TestIncrByTimeStamp() + { + double value = 5.5; + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp timeStamp = DateTime.UtcNow; + Assert.Equal(timeStamp, db.TS().IncrBy(key, value, timestamp: timeStamp)); + Assert.Equal(new TimeSeriesTuple(timeStamp, value), db.TS().Get(key)); + } + + [Fact] + public void TestDefaultIncrByWithRetentionTime() + { + double value = 5.5; + long retentionTime = 5000; + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(db.TS().IncrBy(key, value, retentionTime: retentionTime) > 0); + Assert.Equal(value, db.TS().Get(key).Val); + TimeSeriesInformation info = db.TS().Info(key); + Assert.Equal(retentionTime, info.RetentionTime); + } + + [Fact] + public void TestDefaultIncrByWithLabels() + { + double value = 5.5; + TimeSeriesLabel label = new TimeSeriesLabel("key", "value"); + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var labels = new List { label }; + Assert.True(db.TS().IncrBy(key, value, labels: labels) > 0); + Assert.Equal(value, db.TS().Get(key).Val); + TimeSeriesInformation info = db.TS().Info(key); + Assert.Equal(labels, info.Labels); + } + + [Fact] + public void TestDefaultIncrByWithUncompressed() + { + double value = 5.5; + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(db.TS().IncrBy(key, value, uncompressed: true) > 0); + Assert.Equal(value, db.TS().Get(key).Val); + } + + [Fact] + public void TestWrongParameters() + { + double value = 5.5; + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var ex = Assert.Throws(() => db.TS().IncrBy(key, value, timestamp: "+")); + Assert.Equal("ERR TSDB: invalid timestamp", ex.Message); + ex = Assert.Throws(() => db.TS().IncrBy(key, value, timestamp: "-")); + Assert.Equal("ERR TSDB: invalid timestamp", ex.Message); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestIncrByAsync.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestIncrByAsync.cs new file mode 100644 index 00000000..5bf80603 --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestIncrByAsync.cs @@ -0,0 +1,115 @@ +using NRedisStack.DataTypes; +using NRedisStack.RedisStackCommands; +using StackExchange.Redis; +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using Xunit; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestIncrByAsync : AbstractNRedisStackTest + { + public TestIncrByAsync(RedisFixture redisFixture) : base(redisFixture) { } + + [Fact] + public async Task TestDefaultIncrBy() + { + var key = CreateKeyName(); + var value = 5.5; + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(await db.TS().IncrByAsync(key, value) > 0); + + var result = await db.TS().GetAsync(key); + Assert.Equal(value, result.Val); + } + + [Fact] + public async Task TestStarIncrBy() + { + var key = CreateKeyName(); + var value = 5.5; + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(await db.TS().IncrByAsync(key, value, timestamp: "*") > 0); + + var result = await db.TS().GetAsync(key); + Assert.Equal(value, result.Val); + } + + [Fact] + public async Task TestIncrByTimeStamp() + { + var key = CreateKeyName(); + var value = 5.5; + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeStamp timeStamp = DateTime.UtcNow; + Assert.Equal(timeStamp, await db.TS().IncrByAsync(key, value, timestamp: timeStamp)); + Assert.Equal(new TimeSeriesTuple(timeStamp, value), await db.TS().GetAsync(key)); + } + + [Fact] + public async Task TestDefaultIncrByWithRetentionTime() + { + var key = CreateKeyName(); + var value = 5.5; + long retentionTime = 5000; + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(await db.TS().IncrByAsync(key, value, retentionTime: retentionTime) > 0); + + var result = await db.TS().GetAsync(key); + Assert.Equal(value, result.Val); + + var info = await db.TS().InfoAsync(key); + Assert.Equal(retentionTime, info.RetentionTime); + } + + [Fact] + public async Task TestDefaultIncrByWithLabels() + { + var key = CreateKeyName(); + var value = 5.5; + var label = new TimeSeriesLabel("key", "value"); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var labels = new List { label }; + Assert.True(await db.TS().IncrByAsync(key, value, labels: labels) > 0); + + var result = await db.TS().GetAsync(key); + Assert.Equal(value, result.Val); + + var info = await db.TS().InfoAsync(key); + Assert.Equal(labels, info.Labels); + } + + [Fact] + public async Task TestDefaultIncrByWithUncompressed() + { + var key = CreateKeyName(); + var value = 5.5; + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + Assert.True(await db.TS().IncrByAsync(key, value, uncompressed: true) > 0); + + var result = await db.TS().GetAsync(key); + Assert.Equal(value, result.Val); + } + + [Fact] + public async Task TestWrongParameters() + { + var key = CreateKeyName(); + var value = 5.5; + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var ex = await Assert.ThrowsAsync(async () => await db.TS().IncrByAsync(key, value, timestamp: "+")); + Assert.Equal("ERR TSDB: invalid timestamp", ex.Message); + + ex = await Assert.ThrowsAsync(async () => await db.TS().IncrByAsync(key, value, timestamp: "-")); + Assert.Equal("ERR TSDB: invalid timestamp", ex.Message); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMADD.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMADD.cs new file mode 100644 index 00000000..a35268ca --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMADD.cs @@ -0,0 +1,122 @@ +using System; +using System.Collections.Generic; +using NRedisStack.DataTypes; +using NRedisStack.RedisStackCommands; +using StackExchange.Redis; +using Xunit; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestMADD : AbstractNRedisStackTest, IDisposable + { + + private readonly string[] keys = { "MADD_TESTS_1", "MADD_TESTS_2" }; + + public TestMADD(RedisFixture redisFixture) : base(redisFixture) { } + + public void Dispose() + { + foreach (string key in keys) + { + redisFixture.Redis.GetDatabase().KeyDelete(key); + } + } + + [Fact] + public void TestStarMADD() + { + + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + + foreach (string key in keys) + { + db.TS().Create(key); + } + List<(string, TimeStamp, double)> sequence = new List<(string, TimeStamp, double)>(keys.Length); + foreach (var keyname in keys) + { + sequence.Add((keyname, "*", 1.1)); + } + var response = db.TS().MAdd(sequence); + + Assert.Equal(keys.Length, response.Count); + + foreach (var key in keys) + { + TimeSeriesInformation info = db.TS().Info(key); + Assert.True(info.FirstTimeStamp > 0); + Assert.Equal(info.FirstTimeStamp, info.LastTimeStamp); + } + } + + [Fact] + public void TestSuccessfulMADD() + { + + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + + foreach (string key in keys) + { + db.TS().Create(key); + } + + List<(string, TimeStamp, double)> sequence = new List<(string, TimeStamp, double)>(keys.Length); + List timestamps = new List(keys.Length); + foreach (var keyname in keys) + { + DateTime now = DateTime.UtcNow; + timestamps.Add(now); + sequence.Add((keyname, now, 1.1)); + } + var response = db.TS().MAdd(sequence); + + Assert.Equal(timestamps.Count, response.Count); + for (int i = 0; i < response.Count; i++) + { + Assert.Equal(timestamps[i], response[i]); + } + } + + [Fact] + public void TestOverrideMADD() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + + foreach (string key in keys) + { + db.TS().Create(key); + } + + List oldTimeStamps = new List(); + foreach (var keyname in keys) + { + oldTimeStamps.Add(DateTime.UtcNow); + } + + List<(string, TimeStamp, double)> sequence = new List<(string, TimeStamp, double)>(keys.Length); + foreach (var keyname in keys) + { + sequence.Add((keyname, DateTime.UtcNow, 1.1)); + } + db.TS().MAdd(sequence); + + sequence.Clear(); + + // Override the same events should not throw an error + for (int i = 0; i < keys.Length; i++) + { + sequence.Add((keys[i], oldTimeStamps[i], 1.1)); + } + var response = db.TS().MAdd(sequence); + + Assert.Equal(oldTimeStamps.Count, response.Count); + for (int i = 0; i < response.Count; i++) + { + Assert.Equal(oldTimeStamps[i], response[i]); + } + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMAddAsync.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMAddAsync.cs new file mode 100644 index 00000000..6932ee99 --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMAddAsync.cs @@ -0,0 +1,118 @@ +using NRedisStack.DataTypes; +using NRedisStack.RedisStackCommands; +using StackExchange.Redis; +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using Xunit; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestMAddAsync : AbstractNRedisStackTest + { + public TestMAddAsync(RedisFixture redisFixture) : base(redisFixture) { } + + + [Fact] + public async Task TestStarMADD() + { + var keys = CreateKeyNames(2); + + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + + foreach (string key in keys) + { + await db.TS().CreateAsync(key); + } + + List<(string, TimeStamp, double)> sequence = new List<(string, TimeStamp, double)>(keys.Length); + foreach (var keyname in keys) + { + sequence.Add((keyname, "*", 1.1)); + } + var response = await db.TS().MAddAsync(sequence); + + Assert.Equal(keys.Length, response.Count); + + foreach (var key in keys) + { + TimeSeriesInformation info = await db.TS().InfoAsync(key); + Assert.True(info.FirstTimeStamp > 0); + Assert.Equal(info.FirstTimeStamp, info.LastTimeStamp); + } + } + + + [Fact] + public async Task TestSuccessfulMAdd() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + + foreach (var key in keys) + { + await db.TS().CreateAsync(key); + } + + var sequence = new List<(string, TimeStamp, double)>(keys.Length); + var timestamps = new List(keys.Length); + foreach (var keyname in keys) + { + var now = DateTime.UtcNow; + timestamps.Add(now); + sequence.Add((keyname, now, 1.1)); + } + + var response = await db.TS().MAddAsync(sequence); + Assert.Equal(timestamps.Count, response.Count); + for (var i = 0; i < response.Count; i++) + { + Assert.Equal(timestamps[i], response[i]); + } + } + + [Fact] + public async Task TestOverrideMAdd() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + + foreach (var key in keys) + { + await db.TS().CreateAsync(key); + } + + var oldTimeStamps = new List(); + foreach (var keyname in keys) + { + oldTimeStamps.Add(DateTime.UtcNow); + } + + var sequence = new List<(string, TimeStamp, double)>(keys.Length); + foreach (var keyname in keys) + { + sequence.Add((keyname, DateTime.UtcNow, 1.1)); + } + + await db.TS().MAddAsync(sequence); + sequence.Clear(); + + // Override the same events should not throw an error + for (var i = 0; i < keys.Length; i++) + { + sequence.Add((keys[i], oldTimeStamps[i], 1.1)); + } + + var response = await db.TS().MAddAsync(sequence); + + Assert.Equal(oldTimeStamps.Count, response.Count); + for (int i = 0; i < response.Count; i++) + { + Assert.Equal(oldTimeStamps[i], response[i]); + } + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMGet.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMGet.cs new file mode 100644 index 00000000..7e2b40ba --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMGet.cs @@ -0,0 +1,77 @@ +using System; +using System.Collections.Generic; +using NRedisStack.DataTypes; +using NRedisStack.RedisStackCommands; +using StackExchange.Redis; +using Xunit; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestMGet : AbstractNRedisStackTest, IDisposable + { + + private readonly string[] keys = { "MGET_TESTS_1", "MGET_TESTS_2" }; + + public TestMGet(RedisFixture redisFixture) : base(redisFixture) { } + + public void Dispose() + { + foreach (string key in keys) + { + redisFixture.Redis.GetDatabase().KeyDelete(key); + } + } + + [Fact] + public void TestMGetQuery() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + + var label1 = new TimeSeriesLabel("MGET_TESTS_1", "value"); + var label2 = new TimeSeriesLabel("MGET_TESTS_2", "value2"); + var labels1 = new List { label1, label2 }; + var labels2 = new List { label1 }; + + TimeStamp ts1 = db.TS().Add(keys[0], "*", 1.1, labels: labels1); + TimeSeriesTuple tuple1 = new TimeSeriesTuple(ts1, 1.1); + TimeStamp ts2 = db.TS().Add(keys[1], "*", 2.2, labels: labels2); + TimeSeriesTuple tuple2 = new TimeSeriesTuple(ts2, 2.2); + var results = db.TS().MGet(new List { "MGET_TESTS_1=value" }); + Assert.Equal(2, results.Count); + Assert.Equal(keys[0], results[0].key); + Assert.Equal(tuple1, results[0].value); + Assert.Equal(new List(), results[0].labels); + Assert.Equal(keys[1], results[1].key); + Assert.Equal(tuple2, results[1].value); + Assert.Equal(new List(), results[1].labels); + + } + + [Fact] + public void TestMGetQueryWithLabels() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + + var label1 = new TimeSeriesLabel("MGET_TESTS_1", "value"); + var label2 = new TimeSeriesLabel("MGET_TESTS_2", "value2"); + var labels1 = new List { label1, label2 }; + var labels2 = new List { label1 }; + + TimeStamp ts1 = db.TS().Add(keys[0], "*", 1.1, labels: labels1); + TimeSeriesTuple tuple1 = new TimeSeriesTuple(ts1, 1.1); + TimeStamp ts2 = db.TS().Add(keys[1], "*", 2.2, labels: labels2); + TimeSeriesTuple tuple2 = new TimeSeriesTuple(ts2, 2.2); + + var results = db.TS().MGet(new List { "MGET_TESTS_1=value" }, withLabels: true); + Assert.Equal(2, results.Count); + Assert.Equal(keys[0], results[0].key); + Assert.Equal(tuple1, results[0].value); + Assert.Equal(labels1, results[0].labels); + Assert.Equal(keys[1], results[1].key); + Assert.Equal(tuple2, results[1].value); + Assert.Equal(labels2, results[1].labels); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMGetAsync.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMGetAsync.cs new file mode 100644 index 00000000..ea56af3b --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMGetAsync.cs @@ -0,0 +1,67 @@ +using NRedisStack.DataTypes; +using NRedisStack.RedisStackCommands; +using System.Collections.Generic; +using System.Threading.Tasks; +using Xunit; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestMGetAsync : AbstractNRedisStackTest + { + public TestMGetAsync(RedisFixture redisFixture) : base(redisFixture) { } + + [Fact] + public async Task TestMGetQuery() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + + var label1 = new TimeSeriesLabel(keys[0], "value"); + var label2 = new TimeSeriesLabel(keys[1], "value2"); + var labels1 = new List { label1, label2 }; + var labels2 = new List { label1 }; + + var ts1 = await db.TS().AddAsync(keys[0], "*", 1.1, labels: labels1); + var tuple1 = new TimeSeriesTuple(ts1, 1.1); + var ts2 = await db.TS().AddAsync(keys[1], "*", 2.2, labels: labels2); + var tuple2 = new TimeSeriesTuple(ts2, 2.2); + + var results = await db.TS().MGetAsync(new List { $"{keys[0]}=value" }); + Assert.Equal(2, results.Count); + Assert.Equal(keys[0], results[0].key); + Assert.Equal(tuple1, results[0].value); + Assert.Equal(new List(), results[0].labels); + Assert.Equal(keys[1], results[1].key); + Assert.Equal(tuple2, results[1].value); + Assert.Equal(new List(), results[1].labels); + } + + [Fact] + public async Task TestMGetQueryWithLabels() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + + var label1 = new TimeSeriesLabel(keys[0], "value"); + var label2 = new TimeSeriesLabel(keys[1], "value2"); + var labels1 = new List { label1, label2 }; + var labels2 = new List { label1 }; + + var ts1 = await db.TS().AddAsync(keys[0], "*", 1.1, labels: labels1); + var tuple1 = new TimeSeriesTuple(ts1, 1.1); + var ts2 = await db.TS().AddAsync(keys[1], "*", 2.2, labels: labels2); + var tuple2 = new TimeSeriesTuple(ts2, 2.2); + + var results = await db.TS().MGetAsync(new List { $"{keys[0]}=value" }, withLabels: true); + Assert.Equal(2, results.Count); + Assert.Equal(keys[0], results[0].key); + Assert.Equal(tuple1, results[0].value); + Assert.Equal(labels1, results[0].labels); + Assert.Equal(keys[1], results[1].key); + Assert.Equal(tuple2, results[1].value); + Assert.Equal(labels2, results[1].labels); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMRange.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMRange.cs new file mode 100644 index 00000000..e1cd6ebf --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMRange.cs @@ -0,0 +1,309 @@ +using System; +using System.Collections.Generic; +using NRedisStack.Literals.Enums; +using NRedisStack.DataTypes; +using StackExchange.Redis; +using Xunit; +using NRedisStack.RedisStackCommands; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestMRange : AbstractNRedisStackTest, IDisposable + { + private readonly string[] keys = { "MRANGE_TESTS_1", "MRANGE_TESTS_2" }; + + public TestMRange(RedisFixture redisFixture) : base(redisFixture) { } + + public void Dispose() + { + foreach (string key in keys) + { + redisFixture.Redis.GetDatabase().KeyDelete(key); + } + } + + private List CreateData(IDatabase db, int timeBucket) + { + var tuples = new List(); + + for (int i = 0; i < 10; i++) + { + TimeStamp ts = new TimeStamp(i * timeBucket); + foreach (var key in keys) + { + db.TS().Add(key, ts, i); + } + tuples.Add(new TimeSeriesTuple(ts, i)); + } + return tuples; + } + + [Fact] + public void TestSimpleMRange() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeSeriesLabel label = new TimeSeriesLabel("MRANGEkey", "MRANGEvalue"); + var labels = new List { label }; + foreach (string key in keys) + { + db.TS().Create(key, labels: labels); + } + + var tuples = CreateData(db, 50); + var results = db.TS().MRange("-", "+", new List { "MRANGEkey=MRANGEvalue" }); + Assert.Equal(keys.Length, results.Count); + for (int i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(0, results[i].labels.Count); + Assert.Equal(tuples, results[i].values); + } + } + + [Fact] + public void TestMRangeWithLabels() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeSeriesLabel label = new TimeSeriesLabel("key", "MRangeWithLabels"); + var labels = new List { label }; + foreach (string key in keys) + { + db.TS().Create(key, labels: labels); + } + + var tuples = CreateData(db, 50); + var results = db.TS().MRange("-", "+", new List { "key=MRangeWithLabels" }, withLabels: true); + Assert.Equal(keys.Length, results.Count); + for (int i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(labels, results[i].labels); + Assert.Equal(tuples, results[i].values); + } + } + + [Fact] + public void TestMRangeSelectLabels() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeSeriesLabel label1 = new TimeSeriesLabel("key", "MRangeSelectLabels"); + TimeSeriesLabel[] labels = new TimeSeriesLabel[] { new TimeSeriesLabel("team", "CTO"), new TimeSeriesLabel("team", "AUT") }; + for (int i = 0; i < keys.Length; i++) + { + db.TS().Create(keys[i], labels: new List { label1, labels[i] }); + } + + var tuples = CreateData(db, 50); + // selectLabels and withlabels are mutualy exclusive. + var ex = Assert.Throws(() => db.TS().MRange("-", "+", new List { "key=MRangeSelectLabels" }, + withLabels: true, selectLabels: new List { "team" })); + Assert.Equal("withLabels and selectLabels cannot be specified together.", ex.Message); + + var results = db.TS().MRange("-", "+", new List { "key=MRangeSelectLabels" }, selectLabels: new List { "team" }); + Assert.Equal(keys.Length, results.Count); + for (int i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(labels[i], results[i].labels[0]); + Assert.Equal(tuples, results[i].values); + } + } + + [Fact] + public void TestMRangeFilter() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeSeriesLabel label = new TimeSeriesLabel("key", "MRangeFilter"); + var labels = new List { label }; + db.TS().Create(keys[0], labels: labels); + var tuples = CreateData(db, 50); + var results = db.TS().MRange("-", "+", new List { "key=MRangeFilter" }); + Assert.Equal(1, results.Count); + Assert.Equal(keys[0], results[0].key); + Assert.Equal(0, results[0].labels.Count); + Assert.Equal(tuples, results[0].values); + } + + [Fact] + public void TestMRangeCount() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeSeriesLabel label = new TimeSeriesLabel("key", "MRangeCount"); + var labels = new List { label }; + foreach (string key in keys) + { + db.TS().Create(key, labels: labels); + } + + var tuples = CreateData(db, 50); + long count = 5; + var results = db.TS().MRange("-", "+", new List { "key=MRangeCount" }, count: count); + Assert.Equal(keys.Length, results.Count); + for (int i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(0, results[i].labels.Count); + Assert.Equal(tuples.GetRange(0, (int)count), results[i].values); + } + } + + [Fact] + public void TestMRangeAggregation() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeSeriesLabel label = new TimeSeriesLabel("key", "MRangeAggregation"); + var labels = new List { label }; + foreach (string key in keys) + { + db.TS().Create(key, labels: labels); + } + + var tuples = CreateData(db, 50); + var results = db.TS().MRange("-", "+", new List { "key=MRangeAggregation" }, aggregation: TsAggregation.Min, timeBucket: 50); + Assert.Equal(keys.Length, results.Count); + for (int i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(0, results[i].labels.Count); + Assert.Equal(tuples, results[i].values); + } + } + + [Fact] + public void TestMRangeAlign() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeSeriesLabel label = new TimeSeriesLabel("key", "MRangeAlign"); + var labels = new List { label }; + db.TS().Create(keys[0], labels: labels); + CreateData(db, 50); + var expected = new List { + new TimeSeriesTuple(0,1), + new TimeSeriesTuple(50,1), + new TimeSeriesTuple(100,1) + }; + var results = db.TS().MRange(0, "+", new List { "key=MRangeAlign" }, align: "-", aggregation: TsAggregation.Count, timeBucket: 10, count: 3); + Assert.Equal(1, results.Count); + Assert.Equal(keys[0], results[0].key); + Assert.Equal(expected, results[0].values); + results = db.TS().MRange(1, 500, new List { "key=MRangeAlign" }, align: "+", aggregation: TsAggregation.Count, timeBucket: 10, count: 1); + Assert.Equal(expected[1], results[0].values[0]); + } + + [Fact] + public void TestMissingFilter() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeSeriesLabel label = new TimeSeriesLabel("key", "MissingFilter"); + var labels = new List { label }; + foreach (string key in keys) + { + db.TS().Create(key, labels: labels); + } + + var tuples = CreateData(db, 50); + var ex = Assert.Throws(() => db.TS().MRange("-", "+", new List())); + Assert.Equal("There should be at least one filter on MRANGE/MREVRANGE", ex.Message); + } + + [Fact] + public void TestMissingTimeBucket() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeSeriesLabel label = new TimeSeriesLabel("key", "MissingTimeBucket"); + var labels = new List { label }; + foreach (string key in keys) + { + db.TS().Create(key, labels: labels); + } + + var tuples = CreateData(db, 50); + var ex = Assert.Throws(() => db.TS().MRange("-", "+", new List { "key=MissingTimeBucket" }, aggregation: TsAggregation.Avg)); + Assert.Equal("RANGE Aggregation should have timeBucket value", ex.Message); + } + + [Fact] + public void TestMRangeGroupby() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + for (int i = 0; i < keys.Length; i++) + { + var label1 = new TimeSeriesLabel("key", "MRangeGroupby"); + var label2 = new TimeSeriesLabel("group", i.ToString()); + db.TS().Create(keys[i], labels: new List { label1, label2 }); + } + + var tuples = CreateData(db, 50); + var results = db.TS().MRange("-", "+", new List { "key=MRangeGroupby" }, withLabels: true, groupbyTuple: ("group", TsReduce.Min)); + Assert.Equal(keys.Length, results.Count); + for (int i = 0; i < results.Count; i++) + { + Assert.Equal("group=" + i, results[i].key); + Assert.Equal(new TimeSeriesLabel("group", i.ToString()), results[i].labels[0]); + Assert.Equal(new TimeSeriesLabel("__reducer__", "min"), results[i].labels[1]); + Assert.Equal(new TimeSeriesLabel("__source__", keys[i]), results[i].labels[2]); + Assert.Equal(tuples, results[i].values); + } + } + + [Fact] + public void TestMRangeReduce() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + foreach (var key in keys) + { + var label = new TimeSeriesLabel("key", "MRangeReduce"); + db.TS().Create(key, labels: new List { label }); + } + + var tuples = CreateData(db, 50); + var results = db.TS().MRange("-", "+", new List { "key=MRangeReduce" }, withLabels: true, groupbyTuple: ("key", TsReduce.Sum)); + Assert.Equal(1, results.Count); + Assert.Equal("key=MRangeReduce", results[0].key); + Assert.Equal(new TimeSeriesLabel("key", "MRangeReduce"), results[0].labels[0]); + Assert.Equal(new TimeSeriesLabel("__reducer__", "sum"), results[0].labels[1]); + Assert.Equal(new TimeSeriesLabel("__source__", string.Join(",", keys)), results[0].labels[2]); + for (int i = 0; i < results[0].values.Count; i++) + { + Assert.Equal(tuples[i].Val * 2, results[0].values[i].Val); + } + } + + [Fact] + public void TestMRangeFilterBy() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeSeriesLabel label = new TimeSeriesLabel("key", "MRangeFilterBy"); + var labels = new List { label }; + foreach (string key in keys) + { + db.TS().Create(key, labels: labels); + } + + var tuples = CreateData(db, 50); + var results = db.TS().MRange("-", "+", new List { "key=MRangeFilterBy" }, filterByValue: (0, 2)); + for (int i = 0; i < results.Count; i++) + { + Assert.Equal(tuples.GetRange(0, 3), results[i].values); + } + + results = db.TS().MRange("-", "+", new List { "key=MRangeFilterBy" }, filterByTs: new List { 0 }, filterByValue: (0, 2)); + for (int i = 0; i < results.Count; i++) + { + Assert.Equal(tuples.GetRange(0, 1), results[i].values); + } + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMRangeAsync.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMRangeAsync.cs new file mode 100644 index 00000000..09aef8ea --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMRangeAsync.cs @@ -0,0 +1,322 @@ +using NRedisStack.Literals.Enums; +using NRedisStack.DataTypes; +using StackExchange.Redis; +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using Xunit; +using NRedisStack.RedisStackCommands; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestMRangeAsync : AbstractNRedisStackTest + { + public TestMRangeAsync(RedisFixture redisFixture) : base(redisFixture) { } + + private async Task> CreateData(IDatabase db, string[] keys, int timeBucket) + { + var tuples = new List(); + + for (var i = 0; i < 10; i++) + { + var ts = new TimeStamp(i * timeBucket); + foreach (var key in keys) + { + await db.TS().AddAsync(key, ts, i); + } + tuples.Add(new TimeSeriesTuple(ts, i)); + } + + return tuples; + } + + [Fact] + public async Task TestSimpleMRange() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (var key in keys) + { + await db.TS().CreateAsync(key, labels: labels); + } + + var tuples = await CreateData(db, keys, 50); + var results = db.TS().MRange("-", "+", new List { $"{keys[0]}=value" }); + Assert.Equal(keys.Length, results.Count); + for (var i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(0, results[i].labels.Count); + Assert.Equal(tuples, results[i].values); + } + } + + [Fact] + public async Task TestMRangeWithLabels() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (var key in keys) + { + await db.TS().CreateAsync(key, labels: labels); + } + + var tuples = await CreateData(db, keys, 50); + var results = await db.TS().MRangeAsync("-", "+", new List { $"{keys[0]}=value" }, withLabels: true); + Assert.Equal(keys.Length, results.Count); + for (var i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(labels, results[i].labels); + Assert.Equal(tuples, results[i].values); + } + } + + [Fact] + public async Task TestMRangeSelectLabels() + { + var keys = CreateKeyNames(2); + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeSeriesLabel label1 = new TimeSeriesLabel(keys[0], "value"); + TimeSeriesLabel[] labels = new TimeSeriesLabel[] { new TimeSeriesLabel("team", "CTO"), new TimeSeriesLabel("team", "AUT") }; + for (int i = 0; i < keys.Length; i++) + { + await db.TS().CreateAsync(keys[i], labels: new List { label1, labels[i] }); + } + + var tuples = await CreateData(db, keys, 50); + // selectLabels and withlabels are mutualy exclusive. + var ex = await Assert.ThrowsAsync(async () => + { + await db.TS().MRangeAsync("-", "+", + new List { "key=MRangeSelectLabels" }, + withLabels: true, selectLabels: new List { "team" }); + }); + Assert.Equal("withLabels and selectLabels cannot be specified together.", ex.Message); + + var results = await db.TS().MRangeAsync("-", "+", new List { $"{keys[0]}=value" }, selectLabels: new List { "team" }); + Assert.Equal(keys.Length, results.Count); + for (int i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(labels[i], results[i].labels[0]); + Assert.Equal(tuples, results[i].values); + } + } + + [Fact] + public async Task TestMRangeFilter() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + await db.TS().CreateAsync(keys[0], labels: labels); + var tuples = await CreateData(db, keys, 50); + var results = await db.TS().MRangeAsync("-", "+", new List { $"{keys[0]}=value" }); + Assert.Equal(1, results.Count); + Assert.Equal(keys[0], results[0].key); + Assert.Equal(0, results[0].labels.Count); + Assert.Equal(tuples, results[0].values); + } + + [Fact] + public async Task TestMRangeCount() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (var key in keys) + { + await db.TS().CreateAsync(key, labels: labels); + } + + var tuples = await CreateData(db, keys, 50); + var count = 5L; + var results = await db.TS().MRangeAsync("-", "+", new List { $"{keys[0]}=value" }, count: count); + Assert.Equal(keys.Length, results.Count); + for (var i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(0, results[i].labels.Count); + Assert.Equal(tuples.GetRange(0, (int)count), results[i].values); + } + } + + [Fact] + public async Task TestMRangeAggregation() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (var key in keys) + { + await db.TS().CreateAsync(key, labels: labels); + } + + var tuples = await CreateData(db, keys, 50); + var results = await db.TS().MRangeAsync("-", "+", new List { $"{keys[0]}=value" }, aggregation: TsAggregation.Min, timeBucket: 50); + Assert.Equal(keys.Length, results.Count); + for (var i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(0, results[i].labels.Count); + Assert.Equal(tuples, results[i].values); + } + } + + [Fact] + public async Task TestMRangeAlign() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + db.TS().Create(keys[0], labels: labels); + await CreateData(db, keys, 50); + var expected = new List { + new TimeSeriesTuple(0,1), + new TimeSeriesTuple(50,1), + new TimeSeriesTuple(100,1) + }; + var results = await db.TS().MRangeAsync(0, "+", new List { $"{keys[0]}=value" }, align: "-", aggregation: TsAggregation.Count, timeBucket: 10, count: 3); + Assert.Equal(1, results.Count); + Assert.Equal(keys[0], results[0].key); + Assert.Equal(expected, results[0].values); + results = await db.TS().MRangeAsync(0, 500, new List { $"{keys[0]}=value" }, align: "+", aggregation: TsAggregation.Count, timeBucket: 10, count: 1); + Assert.Equal(expected[0], results[0].values[0]); + } + + [Fact] + public async Task TestMissingFilter() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (var key in keys) + { + await db.TS().CreateAsync(key, labels: labels); + } + + var tuples = await CreateData(db, keys, 50); + var ex = await Assert.ThrowsAsync(async () => await db.TS().MRangeAsync("-", "+", new List())); + Assert.Equal("There should be at least one filter on MRANGE/MREVRANGE", ex.Message); + } + + [Fact] + public async Task TestMissingTimeBucket() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (var key in keys) + { + await db.TS().CreateAsync(key, labels: labels); + } + + var tuples = await CreateData(db, keys, 50); + var ex = await Assert.ThrowsAsync(async () => + { + await db.TS().MRangeAsync("-", "+", + filter: new List() { $"key=value" }, + aggregation: TsAggregation.Avg); + }); + Assert.Equal("RANGE Aggregation should have timeBucket value", ex.Message); + } + + [Fact] + public async Task TestMRangeGroupby() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + for (int i = 0; i < keys.Length; i++) + { + var label1 = new TimeSeriesLabel(keys[0], "value"); + var label2 = new TimeSeriesLabel("group", i.ToString()); + await db.TS().CreateAsync(keys[i], labels: new List { label1, label2 }); + } + + var tuples = await CreateData(db, keys, 50); + var results = await db.TS().MRangeAsync("-", "+", new List { $"{keys[0]}=value" }, withLabels: true, groupbyTuple: ("group", TsReduce.Min)); + Assert.Equal(keys.Length, results.Count); + for (int i = 0; i < results.Count; i++) + { + Assert.Equal("group=" + i, results[i].key); + Assert.Equal(new TimeSeriesLabel("group", i.ToString()), results[i].labels[0]); + Assert.Equal(new TimeSeriesLabel("__reducer__", "min"), results[i].labels[1]); + Assert.Equal(new TimeSeriesLabel("__source__", keys[i]), results[i].labels[2]); + Assert.Equal(tuples, results[i].values); + } + } + + [Fact] + public async Task TestMRangeReduce() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + foreach (var key in keys) + { + var label = new TimeSeriesLabel(keys[0], "value"); + await db.TS().CreateAsync(key, labels: new List { label }); + } + + var tuples = await CreateData(db, keys, 50); + var results = await db.TS().MRangeAsync("-", "+", new List { $"{keys[0]}=value" }, withLabels: true, groupbyTuple: (keys[0], TsReduce.Sum)); + Assert.Equal(1, results.Count); + Assert.Equal($"{keys[0]}=value", results[0].key); + Assert.Equal(new TimeSeriesLabel(keys[0], "value"), results[0].labels[0]); + Assert.Equal(new TimeSeriesLabel("__reducer__", "sum"), results[0].labels[1]); + Assert.Equal(new TimeSeriesLabel("__source__", string.Join(",", keys)), results[0].labels[2]); + for (int i = 0; i < results[0].values.Count; i++) + { + Assert.Equal(tuples[i].Val * 2, results[0].values[i].Val); + } + } + + [Fact] + public async Task TestMRangeFilterBy() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeSeriesLabel label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (string key in keys) + { + await db.TS().CreateAsync(key, labels: labels); + } + + var tuples = await CreateData(db, keys, 50); + var results = await db.TS().MRangeAsync("-", "+", new List { $"{keys[0]}=value" }, filterByValue: (0, 2)); + for (int i = 0; i < results.Count; i++) + { + Assert.Equal(tuples.GetRange(0, 3), results[i].values); + } + + results = await db.TS().MRangeAsync("-", "+", new List { $"{keys[0]}=value" }, filterByTs: new List { 0 }, filterByValue: (0, 2)); + for (int i = 0; i < results.Count; i++) + { + Assert.Equal(tuples.GetRange(0, 1), results[i].values); + } + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMRevRange.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMRevRange.cs new file mode 100644 index 00000000..460a0da4 --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMRevRange.cs @@ -0,0 +1,309 @@ +using System; +using System.Collections.Generic; +using NRedisStack.Literals.Enums; +using NRedisStack.DataTypes; +using StackExchange.Redis; +using Xunit; +using NRedisStack.RedisStackCommands; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestMRevRange : AbstractNRedisStackTest + { + public TestMRevRange(RedisFixture redisFixture) : base(redisFixture) { } + + private List CreateData(IDatabase db, string[] keys, int timeBucket) + { + var tuples = new List(); + + for (var i = 0; i < 10; i++) + { + var ts = new TimeStamp(i * timeBucket); + foreach (var key in keys) + { + db.TS().Add(key, ts, i); + + } + tuples.Add(new TimeSeriesTuple(ts, i)); + } + return tuples; + } + + [Fact] + public void TestSimpleMRevRange() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (var key in keys) + { + db.TS().Create(key, labels: labels); + } + + var tuples = CreateData(db, keys, 50); + var results = db.TS().MRevRange("-", "+", new List { $"{keys[0]}=value" }); + Assert.Equal(keys.Length, results.Count); + for (var i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(0, results[i].labels.Count); + Assert.Equal(ReverseData(tuples), results[i].values); + } + } + + [Fact] + public void TestMRevRangeWithLabels() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (var key in keys) + { + db.TS().Create(key, labels: labels); + } + + var tuples = CreateData(db, keys, 50); + var results = db.TS().MRevRange("-", "+", new List { $"{keys[0]}=value" }, withLabels: true); + + Assert.Equal(keys.Length, results.Count); + for (var i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(labels, results[i].labels); + Assert.Equal(ReverseData(tuples), results[i].values); + } + } + + [Fact] + public void TestMRevRangeSelectLabels() + { + var keys = CreateKeyNames(2); + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeSeriesLabel label1 = new TimeSeriesLabel("key", "MRangeSelectLabels"); + TimeSeriesLabel[] labels = new TimeSeriesLabel[] { new TimeSeriesLabel("team", "CTO"), new TimeSeriesLabel("team", "AUT") }; + for (int i = 0; i < keys.Length; i++) + { + db.TS().Create(keys[i], labels: new List { label1, labels[i] }); + } + + var tuples = CreateData(db, keys, 50); + var results = db.TS().MRevRange("-", "+", new List { "key=MRangeSelectLabels" }, selectLabels: new List { "team" }); + Assert.Equal(keys.Length, results.Count); + for (int i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(labels[i], results[i].labels[0]); + Assert.Equal(ReverseData(tuples), results[i].values); + } + } + + [Fact] + public void TestMRevRangeFilter() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + db.TS().Create(keys[0], labels: labels); + var tuples = CreateData(db, keys, 50); + var results = db.TS().MRevRange("-", "+", new List { $"{keys[0]}=value" }); + Assert.Equal(1, results.Count); + Assert.Equal(keys[0], results[0].key); + Assert.Equal(0, results[0].labels.Count); + Assert.Equal(ReverseData(tuples), results[0].values); + } + + [Fact] + public void TestMRevRangeCount() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (var key in keys) + { + db.TS().Create(key, labels: labels); + } + + var tuples = CreateData(db, keys, 50); + var count = 5L; + var results = db.TS().MRevRange("-", "+", new List { $"{keys[0]}=value" }, count: count); + Assert.Equal(keys.Length, results.Count); + for (var i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(0, results[i].labels.Count); + Assert.Equal(ReverseData(tuples).GetRange(0, (int)count), results[i].values); + } + } + + [Fact] + public void TestMRevRangeAggregation() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (var key in keys) + { + db.TS().Create(key, labels: labels); + } + + var tuples = CreateData(db, keys, 50); + var results = db.TS().MRevRange("-", "+", new List { $"{keys[0]}=value" }, aggregation: TsAggregation.Min, timeBucket: 50); + Assert.Equal(keys.Length, results.Count); + for (var i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(0, results[i].labels.Count); + Assert.Equal(ReverseData(tuples), results[i].values); + } + } + + [Fact] + public void TestMRevRangeAlign() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + db.TS().Create(keys[0], labels: labels); + CreateData(db, keys, 50); + var expected = new List { + new TimeSeriesTuple(450,1), + new TimeSeriesTuple(400,1), + new TimeSeriesTuple(350,1) + }; + var results = db.TS().MRevRange(0, "+", new List { $"{keys[0]}=value" }, align: "-", aggregation: TsAggregation.Count, timeBucket: 10, count: 3); + Assert.Equal(1, results.Count); + Assert.Equal(keys[0], results[0].key); + Assert.Equal(expected, results[0].values); + results = db.TS().MRevRange(0, 500, new List { $"{keys[0]}=value" }, align: "+", aggregation: TsAggregation.Count, timeBucket: 10, count: 1); + Assert.Equal(expected[0], results[0].values[0]); + } + + [Fact] + public void TestMissingFilter() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (var key in keys) + { + db.TS().Create(key, labels: labels); + } + + var tuples = CreateData(db, keys, 50); + var ex = Assert.Throws(() => db.TS().MRevRange("-", "+", new List())); + Assert.Equal("There should be at least one filter on MRANGE/MREVRANGE", ex.Message); + } + + [Fact] + public void TestMissingTimeBucket() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (var key in keys) + { + db.TS().Create(key, labels: labels); + } + + var tuples = CreateData(db, keys, 50); + var ex = Assert.Throws(() => db.TS().MRevRange("-", "+", new List { "key=MissingTimeBucket" }, aggregation: TsAggregation.Avg)); + Assert.Equal("RANGE Aggregation should have timeBucket value", ex.Message); + } + + [Fact] + public void TestMRevRangeGroupby() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + for (int i = 0; i < keys.Length; i++) + { + var label1 = new TimeSeriesLabel(keys[0], "value"); + var label2 = new TimeSeriesLabel("group", i.ToString()); + db.TS().Create(keys[i], labels: new List { label1, label2 }); + } + + var tuples = CreateData(db, keys, 50); + var results = db.TS().MRevRange("-", "+", new List { $"{keys[0]}=value" }, withLabels: true, groupbyTuple: ("group", TsReduce.Min)); + Assert.Equal(keys.Length, results.Count); + for (var i = 0; i < results.Count; i++) + { + Assert.Equal("group=" + i, results[i].key); + Assert.Equal(new TimeSeriesLabel("group", i.ToString()), results[i].labels[0]); + Assert.Equal(new TimeSeriesLabel("__reducer__", "min"), results[i].labels[1]); + Assert.Equal(new TimeSeriesLabel("__source__", keys[i]), results[i].labels[2]); + Assert.Equal(ReverseData(tuples), results[i].values); + } + } + + [Fact] + public void TestMRevRangeReduce() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + foreach (var key in keys) + { + var label = new TimeSeriesLabel(keys[0], "value"); + db.TS().Create(key, labels: new List { label }); + } + + var tuples = CreateData(db, keys, 50); + var results = db.TS().MRevRange("-", "+", new List { $"{keys[0]}=value" }, withLabels: true, groupbyTuple: (keys[0], TsReduce.Sum)); + Assert.Equal(1, results.Count); + Assert.Equal($"{keys[0]}=value", results[0].key); + Assert.Equal(new TimeSeriesLabel(keys[0], "value"), results[0].labels[0]); + Assert.Equal(new TimeSeriesLabel("__reducer__", "sum"), results[0].labels[1]); + Assert.Equal(new TimeSeriesLabel("__source__", string.Join(",", keys)), results[0].labels[2]); + tuples = ReverseData(tuples); + for (int i = 0; i < results[0].values.Count; i++) + { + Assert.Equal(tuples[i].Val * 2, results[0].values[i].Val); + } + } + + [Fact] + public void TestMRevRangeFilterBy() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (string key in keys) + { + db.TS().Create(key, labels: labels); + } + + var tuples = CreateData(db, keys, 50); + var results = db.TS().MRevRange("-", "+", new List { "key=MRangeFilterBy" }, filterByValue: (0, 2)); + for (int i = 0; i < results.Count; i++) + { + Assert.Equal(ReverseData(tuples.GetRange(0, 3)), results[i].values); + } + + results = db.TS().MRevRange("-", "+", new List { "key=MRangeFilterBy" }, filterByTs: new List { 0 }, filterByValue: (0, 2)); + for (int i = 0; i < results.Count; i++) + { + Assert.Equal(ReverseData(tuples.GetRange(0, 1)), results[i].values); + } + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMRevRangeAsync.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMRevRangeAsync.cs new file mode 100644 index 00000000..d8e95fb4 --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestMRevRangeAsync.cs @@ -0,0 +1,314 @@ +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using StackExchange.Redis; +using NRedisStack.Literals.Enums; +using NRedisStack.DataTypes; +using Xunit; +using NRedisStack.RedisStackCommands; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestMRevRangeAsync : AbstractNRedisStackTest + { + public TestMRevRangeAsync(RedisFixture redisFixture) : base(redisFixture) { } + + private async Task> CreateData(IDatabase db, string[] keys, int timeBucket) + { + var tuples = new List(); + + for (var i = 0; i < 10; i++) + { + var ts = new TimeStamp(i * timeBucket); + foreach (var key in keys) + { + await db.TS().AddAsync(key, ts, i); + } + tuples.Add(new TimeSeriesTuple(ts, i)); + } + + return tuples; + } + + [Fact] + public async Task TestSimpleMRevRange() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (var key in keys) + { + await db.TS().CreateAsync(key, labels: labels); + } + + var tuples = await CreateData(db, keys, 50); + var results = await db.TS().MRevRangeAsync("-", "+", new List { $"{keys[0]}=value" }); + Assert.Equal(keys.Length, results.Count); + for (var i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(0, results[i].labels.Count); + Assert.Equal(ReverseData(tuples), results[i].values); + } + } + + [Fact] + public async Task TestMRevRangeWithLabels() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (var key in keys) + { + await db.TS().CreateAsync(key, labels: labels); + } + + var tuples = await CreateData(db, keys, 50); + var results = await db.TS().MRevRangeAsync("-", "+", new List { $"{keys[0]}=value" }, withLabels: true); + Assert.Equal(keys.Length, results.Count); + for (var i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(labels, results[i].labels); + Assert.Equal(ReverseData(tuples), results[i].values); + } + } + + [Fact] + public async Task TestMRevRangeSelectLabels() + { + var keys = CreateKeyNames(2); + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeSeriesLabel label1 = new TimeSeriesLabel(keys[0], "value"); + TimeSeriesLabel[] labels = new TimeSeriesLabel[] { new TimeSeriesLabel("team", "CTO"), new TimeSeriesLabel("team", "AUT") }; + for (int i = 0; i < keys.Length; i++) + { + await db.TS().CreateAsync(keys[i], labels: new List { label1, labels[i] }); + } + + var tuples = await CreateData(db, keys, 50); + var results = await db.TS().MRevRangeAsync("-", "+", new List { $"{keys[0]}=value" }, selectLabels: new List { "team" }); + Assert.Equal(keys.Length, results.Count); + for (int i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(labels[i], results[i].labels[0]); + Assert.Equal(ReverseData(tuples), results[i].values); + } + } + + [Fact] + public async Task TestMRevRangeFilter() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + await db.TS().CreateAsync(keys[0], labels: labels); + var tuples = await CreateData(db, keys, 50); + var results = await db.TS().MRevRangeAsync("-", "+", new List { $"{keys[0]}=value" }); + Assert.Equal(1, results.Count); + Assert.Equal(keys[0], results[0].key); + Assert.Equal(0, results[0].labels.Count); + Assert.Equal(ReverseData(tuples), results[0].values); + } + + [Fact] + public async Task TestMRevRangeCount() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (var key in keys) + { + await db.TS().CreateAsync(key, labels: labels); + } + + var tuples = await CreateData(db, keys, 50); + var count = 5L; + var results = await db.TS().MRevRangeAsync("-", "+", new List { $"{keys[0]}=value" }, count: count); + Assert.Equal(keys.Length, results.Count); + for (var i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(0, results[i].labels.Count); + Assert.Equal(ReverseData(tuples).GetRange(0, (int)count), results[i].values); + } + } + + [Fact] + public async Task TestMRangeAggregation() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (var key in keys) + { + await db.TS().CreateAsync(key, labels: labels); + } + + var tuples = await CreateData(db, keys, 50); + var results = await db.TS().MRevRangeAsync("-", "+", new List { $"{keys[0]}=value" }, aggregation: TsAggregation.Min, timeBucket: 50); + Assert.Equal(keys.Length, results.Count); + for (var i = 0; i < results.Count; i++) + { + Assert.Equal(keys[i], results[i].key); + Assert.Equal(0, results[i].labels.Count); + Assert.Equal(ReverseData(tuples), results[i].values); + } + } + + [Fact] + public async Task TestMRevRangeAlign() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + db.TS().Create(keys[0], labels: labels); + await CreateData(db, keys, 50); + var expected = new List { + new TimeSeriesTuple(450,1), + new TimeSeriesTuple(400,1), + new TimeSeriesTuple(350,1) + }; + var results = await db.TS().MRevRangeAsync(0, "+", new List { $"{keys[0]}=value" }, align: "-", aggregation: TsAggregation.Count, timeBucket: 10, count: 3); + Assert.Equal(1, results.Count); + Assert.Equal(keys[0], results[0].key); + Assert.Equal(expected, results[0].values); + results = await db.TS().MRevRangeAsync(0, 500, new List { $"{keys[0]}=value" }, align: "+", aggregation: TsAggregation.Count, timeBucket: 10, count: 1); + Assert.Equal(expected[0], results[0].values[0]); + } + + [Fact] + public async Task TestMissingFilter() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (var key in keys) + { + await db.TS().CreateAsync(key, labels: labels); + } + + var tuples = await CreateData(db, keys, 50); + var ex = await Assert.ThrowsAsync(async () => await db.TS().MRevRangeAsync("-", "+", new List())); + Assert.Equal("There should be at least one filter on MRANGE/MREVRANGE", ex.Message); + } + + [Fact] + public async Task TestMissingTimeBucket() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (var key in keys) + { + await db.TS().CreateAsync(key, labels: labels); + } + + var tuples = await CreateData(db, keys, 50); + var ex = await Assert.ThrowsAsync(async () => + { + await db.TS().MRevRangeAsync("-", "+", + filter: new List() { $"key=value" }, + aggregation: TsAggregation.Avg); + }); + Assert.Equal("RANGE Aggregation should have timeBucket value", ex.Message); + } + + [Fact] + public async Task TestMRevRangeGroupby() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + for (int i = 0; i < keys.Length; i++) + { + var label1 = new TimeSeriesLabel(keys[0], "value"); + var label2 = new TimeSeriesLabel("group", i.ToString()); + await db.TS().CreateAsync(keys[i], labels: new List { label1, label2 }); + } + + var tuples = await CreateData(db, keys, 50); + var results = await db.TS().MRevRangeAsync("-", "+", new List { $"{keys[0]}=value" }, withLabels: true, groupbyTuple: ("group", TsReduce.Min)); + Assert.Equal(keys.Length, results.Count); + for (var i = 0; i < results.Count; i++) + { + Assert.Equal("group=" + i, results[i].key); + Assert.Equal(new TimeSeriesLabel("group", i.ToString()), results[i].labels[0]); + Assert.Equal(new TimeSeriesLabel("__reducer__", "min"), results[i].labels[1]); + Assert.Equal(new TimeSeriesLabel("__source__", keys[i]), results[i].labels[2]); + Assert.Equal(ReverseData(tuples), results[i].values); + } + } + + [Fact] + public async Task TestMRevRangeReduce() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + foreach (var key in keys) + { + var label = new TimeSeriesLabel(keys[0], "value"); + await db.TS().CreateAsync(key, labels: new List { label }); + } + + var tuples = await CreateData(db, keys, 50); + var results = await db.TS().MRevRangeAsync("-", "+", new List { $"{keys[0]}=value" }, withLabels: true, groupbyTuple: (keys[0], TsReduce.Sum)); + Assert.Equal(1, results.Count); + Assert.Equal($"{keys[0]}=value", results[0].key); + Assert.Equal(new TimeSeriesLabel(keys[0], "value"), results[0].labels[0]); + Assert.Equal(new TimeSeriesLabel("__reducer__", "sum"), results[0].labels[1]); + Assert.Equal(new TimeSeriesLabel("__source__", string.Join(",", keys)), results[0].labels[2]); + tuples = ReverseData(tuples); + for (int i = 0; i < results[0].values.Count; i++) + { + Assert.Equal(tuples[i].Val * 2, results[0].values[i].Val); + } + } + + [Fact] + public async Task TestMRevRangeFilterBy() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + TimeSeriesLabel label = new TimeSeriesLabel(keys[0], "value"); + var labels = new List { label }; + foreach (string key in keys) + { + await db.TS().CreateAsync(key, labels: labels); + } + + var tuples = await CreateData(db, keys, 50); + var results = await db.TS().MRevRangeAsync("-", "+", new List { $"{keys[0]}=value" }, filterByValue: (0, 2)); + for (int i = 0; i < results.Count; i++) + { + Assert.Equal(ReverseData(tuples.GetRange(0, 3)), results[i].values); + } + + results = await db.TS().MRevRangeAsync("-", "+", new List { $"{keys[0]}=value" }, filterByTs: new List { 0 }, filterByValue: (0, 2)); + for (int i = 0; i < results.Count; i++) + { + Assert.Equal(ReverseData(tuples.GetRange(0, 1)), results[i].values); + } + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestQueryIndex.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestQueryIndex.cs new file mode 100644 index 00000000..cf23baf2 --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestQueryIndex.cs @@ -0,0 +1,39 @@ +using System; +using System.Collections.Generic; +using NRedisStack.DataTypes; +using NRedisStack.RedisStackCommands; +using Xunit; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestQueryIndex : AbstractNRedisStackTest, IDisposable + { + private readonly string[] keys = { "QUERYINDEX_TESTS_1", "QUERYINDEX_TESTS_2" }; + + public TestQueryIndex(RedisFixture redisFixture) : base(redisFixture) { } + + public void Dispose() + { + foreach (var key in keys) + { + redisFixture.Redis.GetDatabase().KeyDelete(key); + } + } + + [Fact] + public void TestTSQueryIndex() + { + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label1 = new TimeSeriesLabel("QUERYINDEX_TESTS_1", "value"); + var label2 = new TimeSeriesLabel("QUERYINDEX_TESTS_2", "value2"); + var labels1 = new List { label1, label2 }; + var labels2 = new List { label1 }; + + db.TS().Create(keys[0], labels: labels1); + db.TS().Create(keys[1], labels: labels2); + Assert.Equal(keys, db.TS().QueryIndex(new List { "QUERYINDEX_TESTS_1=value" })); + Assert.Equal(new List { keys[0] }, db.TS().QueryIndex(new List { "QUERYINDEX_TESTS_2=value2" })); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestQueryIndexAsync.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestQueryIndexAsync.cs new file mode 100644 index 00000000..7aa949c7 --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestQueryIndexAsync.cs @@ -0,0 +1,30 @@ +using NRedisStack.DataTypes; +using NRedisStack.RedisStackCommands; +using System.Collections.Generic; +using System.Threading.Tasks; +using Xunit; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestQueryIndexAsync : AbstractNRedisStackTest + { + public TestQueryIndexAsync(RedisFixture redisFixture) : base(redisFixture) { } + + [Fact] + public async Task TestTSQueryIndex() + { + var keys = CreateKeyNames(2); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var label1 = new TimeSeriesLabel(keys[0], "value"); + var label2 = new TimeSeriesLabel(keys[1], "value2"); + var labels1 = new List { label1, label2 }; + var labels2 = new List { label1 }; + + await db.TS().CreateAsync(keys[0], labels: labels1); + await db.TS().CreateAsync(keys[1], labels: labels2); + Assert.Equal(keys, db.TS().QueryIndex(new List { $"{keys[0]}=value" })); + Assert.Equal(new List { keys[0] }, db.TS().QueryIndex(new List { $"{keys[1]}=value2" })); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestRange.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestRange.cs new file mode 100644 index 00000000..4a6392ab --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestRange.cs @@ -0,0 +1,129 @@ +using System; +using System.Collections.Generic; +using NRedisStack.Literals.Enums; +using NRedisStack.DataTypes; +using StackExchange.Redis; +using Xunit; +using NRedisStack.RedisStackCommands; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestRange : AbstractNRedisStackTest, IDisposable + { + private readonly string key = "RANGE_TESTS"; + + public TestRange(RedisFixture redisFixture) : base(redisFixture) { } + + public void Dispose() + { + redisFixture.Redis.GetDatabase().KeyDelete(key); + } + + private List CreateData(IDatabase db, int timeBucket) + { + var tuples = new List(); + for (int i = 0; i < 10; i++) + { + TimeStamp ts = db.TS().Add(key, i * timeBucket, i); + tuples.Add(new TimeSeriesTuple(ts, i)); + } + return tuples; + } + + [Fact] + public void TestSimpleRange() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = CreateData(db, 50); + Assert.Equal(tuples, db.TS().Range(key, "-", "+")); + } + + [Fact] + public void TestRangeCount() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = CreateData(db, 50); + Assert.Equal(tuples.GetRange(0, 5), db.TS().Range(key, "-", "+", count: 5)); + } + + [Fact] + public void TestRangeAggregation() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = CreateData(db, 50); + Assert.Equal(tuples, db.TS().Range(key, "-", "+", aggregation: TsAggregation.Min, timeBucket: 50)); + } + + [Fact] + public void TestRangeAlign() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = new List() + { + new TimeSeriesTuple(1, 10), + new TimeSeriesTuple(3, 5), + new TimeSeriesTuple(11, 10), + new TimeSeriesTuple(21, 11) + }; + + foreach (var tuple in tuples) + { + db.TS().Add(key, tuple.Time, tuple.Val); + } + + // Aligh start + var resStart = new List() + { + new TimeSeriesTuple(1, 2), + new TimeSeriesTuple(11, 1), + new TimeSeriesTuple(21, 1) + }; + Assert.Equal(resStart, db.TS().Range(key, 1, 30, align: "-", aggregation: TsAggregation.Count, timeBucket: 10)); + + // Aligh end + var resEnd = new List() + { + new TimeSeriesTuple(0, 2), + new TimeSeriesTuple(10, 1), + new TimeSeriesTuple(20, 1) + }; + Assert.Equal(resEnd, db.TS().Range(key, 1, 30, align: "+", aggregation: TsAggregation.Count, timeBucket: 10)); + + // Align 1 + Assert.Equal(resStart, db.TS().Range(key, 1, 30, align: 1, aggregation: TsAggregation.Count, timeBucket: 10)); + } + + [Fact] + public void TestMissingTimeBucket() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = CreateData(db, 50); + var ex = Assert.Throws(() => db.TS().Range(key, "-", "+", aggregation: TsAggregation.Avg)); + Assert.Equal("RANGE Aggregation should have timeBucket value", ex.Message); + } + + [Fact] + public void TestFilterBy() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = CreateData(db, 50); + + var res = db.TS().Range(key, "-", "+", filterByValue: (0, 2)); // The first 3 tuples + Assert.Equal(3, res.Count); + Assert.Equal(tuples.GetRange(0, 3), res); + + var filterTs = new List { 0, 50, 100 }; // Also the first 3 tuples + res = db.TS().Range(key, "-", "+", filterByTs: filterTs); + Assert.Equal(tuples.GetRange(0, 3), res); + + res = db.TS().Range(key, "-", "+", filterByTs: filterTs, filterByValue: (2, 5)); // The third tuple + Assert.Equal(tuples.GetRange(2, 1), res); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestRangeAsync.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestRangeAsync.cs new file mode 100644 index 00000000..9083ea8a --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestRangeAsync.cs @@ -0,0 +1,129 @@ +using NRedisStack.Literals.Enums; +using NRedisStack.DataTypes; +using StackExchange.Redis; +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using Xunit; +using NRedisStack.RedisStackCommands; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestRangeAsync : AbstractNRedisStackTest + { + public TestRangeAsync(RedisFixture redisFixture) : base(redisFixture) { } + + private async Task> CreateData(IDatabase db, string key, int timeBucket) + { + var tuples = new List(); + for (var i = 0; i < 10; i++) + { + var ts = await db.TS().AddAsync(key, i * timeBucket, i); + tuples.Add(new TimeSeriesTuple(ts, i)); + } + return tuples; + } + + [Fact] + public async Task TestSimpleRange() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = await CreateData(db, key, 50); + Assert.Equal(tuples, await db.TS().RangeAsync(key, "-", "+")); + } + + [Fact] + public async Task TestRangeCount() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = await CreateData(db, key, 50); + Assert.Equal(tuples.GetRange(0, 5), await db.TS().RangeAsync(key, "-", "+", count: 5)); + } + + [Fact] + public async Task TestRangeAggregation() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = await CreateData(db, key, 50); + Assert.Equal(tuples, await db.TS().RangeAsync(key, "-", "+", aggregation: TsAggregation.Min, timeBucket: 50)); + } + + [Fact] + public async Task TestRangeAlign() + { + var key = CreateKeyName(); + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = new List() + { + new TimeSeriesTuple(1, 10), + new TimeSeriesTuple(3, 5), + new TimeSeriesTuple(11, 10), + new TimeSeriesTuple(21, 11) + }; + + foreach (var tuple in tuples) + { + await db.TS().AddAsync(key, tuple.Time, tuple.Val); + } + + // Aligh start + var resStart = new List() + { + new TimeSeriesTuple(1, 2), + new TimeSeriesTuple(11, 1), + new TimeSeriesTuple(21, 1) + }; + Assert.Equal(resStart, await db.TS().RangeAsync(key, 1, 30, align: "-", aggregation: TsAggregation.Count, timeBucket: 10)); + + // Aligh end + var resEnd = new List() + { + new TimeSeriesTuple(0, 2), + new TimeSeriesTuple(10, 1), + new TimeSeriesTuple(20, 1) + }; + Assert.Equal(resEnd, await db.TS().RangeAsync(key, 1, 30, align: "+", aggregation: TsAggregation.Count, timeBucket: 10)); + + // Align 1 + Assert.Equal(resStart, await db.TS().RangeAsync(key, 1, 30, align: 1, aggregation: TsAggregation.Count, timeBucket: 10)); + } + + [Fact] + public async Task TestMissingTimeBucket() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = await CreateData(db, key, 50); + var ex = await Assert.ThrowsAsync(async () => await db.TS().RangeAsync(key, "-", "+", aggregation: TsAggregation.Avg)); + Assert.Equal("RANGE Aggregation should have timeBucket value", ex.Message); + } + + [Fact] + public async Task TestFilterBy() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = await CreateData(db, key, 50); + + var res = await db.TS().RangeAsync(key, "-", "+", filterByValue: (0, 2)); // The first 3 tuples + Assert.Equal(3, res.Count); + Assert.Equal(tuples.GetRange(0, 3), res); + + var filterTs = new List { 0, 50, 100 }; // Also the first 3 tuples + res = await db.TS().RangeAsync(key, "-", "+", filterByTs: filterTs); + Assert.Equal(tuples.GetRange(0, 3), res); + + res = await db.TS().RangeAsync(key, "-", "+", filterByTs: filterTs, filterByValue: (2, 5)); // The third tuple + Assert.Equal(tuples.GetRange(2, 1), res); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestRevRange.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestRevRange.cs new file mode 100644 index 00000000..7a6ee312 --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestRevRange.cs @@ -0,0 +1,129 @@ +using System; +using System.Collections.Generic; +using NRedisStack.Literals.Enums; +using NRedisStack.DataTypes; +using StackExchange.Redis; +using Xunit; +using NRedisStack.RedisStackCommands; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestRevRange : AbstractNRedisStackTest + { + public TestRevRange(RedisFixture redisFixture) : base(redisFixture) { } + + private List CreateData(IDatabase db, string key, int timeBucket) + { + var tuples = new List(); + for (var i = 0; i < 10; i++) + { + var ts = db.TS().Add(key, i * timeBucket, i); + tuples.Add(new TimeSeriesTuple(ts, i)); + } + return tuples; + } + + [Fact] + public void TestSimpleRevRange() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = CreateData(db, key, 50); + Assert.Equal(ReverseData(tuples), db.TS().RevRange(key, "-", "+")); + } + + [Fact] + public void TestRevRangeCount() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = CreateData(db, key, 50); + Assert.Equal(ReverseData(tuples).GetRange(0, 5), db.TS().RevRange(key, "-", "+", count: 5)); + } + + [Fact] + public void TestRevRangeAggregation() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = CreateData(db, key, 50); + Assert.Equal(ReverseData(tuples), db.TS().RevRange(key, "-", "+", aggregation: TsAggregation.Min, timeBucket: 50)); + } + + [Fact] + public void TestRevRangeAlign() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = new List() + { + new TimeSeriesTuple(1, 10), + new TimeSeriesTuple(3, 5), + new TimeSeriesTuple(11, 10), + new TimeSeriesTuple(21, 11) + }; + + foreach (var tuple in tuples) + { + db.TS().Add(key, tuple.Time, tuple.Val); + } + + // Aligh start + var resStart = new List() + { + new TimeSeriesTuple(21, 1), + new TimeSeriesTuple(11, 1), + new TimeSeriesTuple(1, 2) + }; + Assert.Equal(resStart, db.TS().RevRange(key, 1, 30, align: "-", aggregation: TsAggregation.Count, timeBucket: 10)); + + // Aligh end + var resEnd = new List() + { + new TimeSeriesTuple(20, 1), + new TimeSeriesTuple(10, 1), + new TimeSeriesTuple(0, 2) + }; + Assert.Equal(resEnd, db.TS().RevRange(key, 1, 30, align: "+", aggregation: TsAggregation.Count, timeBucket: 10)); + + // Align 1 + Assert.Equal(resStart, db.TS().RevRange(key, 1, 30, align: 1, aggregation: TsAggregation.Count, timeBucket: 10)); + } + + [Fact] + public void TestMissingTimeBucket() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = CreateData(db, key, 50); + var ex = Assert.Throws(() => db.TS().RevRange(key, "-", "+", aggregation: TsAggregation.Avg)); + Assert.Equal("RANGE Aggregation should have timeBucket value", ex.Message); + + } + + [Fact] + public void TestFilterBy() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = CreateData(db, key, 50); + + var res = db.TS().RevRange(key, "-", "+", filterByValue: (0, 2)); + Assert.Equal(3, res.Count); + Assert.Equal(ReverseData(tuples.GetRange(0, 3)), res); + + var filterTs = new List { 0, 50, 100 }; + res = db.TS().RevRange(key, "-", "+", filterByTs: filterTs); + Assert.Equal(ReverseData(tuples.GetRange(0, 3)), res); + + res = db.TS().RevRange(key, "-", "+", filterByTs: filterTs, filterByValue: (2, 5)); + Assert.Equal(tuples.GetRange(2, 1), res); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestRevRangeAsync.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestRevRangeAsync.cs new file mode 100644 index 00000000..651e27cc --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestRevRangeAsync.cs @@ -0,0 +1,129 @@ +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using StackExchange.Redis; +using NRedisStack.Literals.Enums; +using NRedisStack.DataTypes; +using Xunit; +using NRedisStack.RedisStackCommands; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestRevRangeAsync : AbstractNRedisStackTest + { + public TestRevRangeAsync(RedisFixture redisFixture) : base(redisFixture) { } + + private async Task> CreateData(IDatabase db, string key, int timeBucket) + { + var tuples = new List(); + for (var i = 0; i < 10; i++) + { + var ts = await db.TS().AddAsync(key, i * timeBucket, i); + tuples.Add(new TimeSeriesTuple(ts, i)); + } + return tuples; + } + + [Fact] + public async Task TestSimpleRevRange() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = await CreateData(db, key, 50); + Assert.Equal(ReverseData(tuples), await db.TS().RevRangeAsync(key, "-", "+")); + } + + [Fact] + public async Task TestRevRangeCount() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = await CreateData(db, key, 50); + Assert.Equal(ReverseData(tuples).GetRange(0, 5), await db.TS().RevRangeAsync(key, "-", "+", count: 5)); + } + + [Fact] + public async Task TestRevRangeAggregation() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = await CreateData(db, key, 50); + Assert.Equal(ReverseData(tuples), await db.TS().RevRangeAsync(key, "-", "+", aggregation: TsAggregation.Min, timeBucket: 50)); + } + + [Fact] + public async Task TestRevRangeAlign() + { + var key = CreateKeyName(); + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = new List() + { + new TimeSeriesTuple(1, 10), + new TimeSeriesTuple(3, 5), + new TimeSeriesTuple(11, 10), + new TimeSeriesTuple(21, 11) + }; + + foreach (var tuple in tuples) + { + await db.TS().AddAsync(key, tuple.Time, tuple.Val); + } + + // Aligh start + var resStart = new List() + { + new TimeSeriesTuple(21, 1), + new TimeSeriesTuple(11, 1), + new TimeSeriesTuple(1, 2) + }; + Assert.Equal(resStart, await db.TS().RevRangeAsync(key, 1, 30, align: "-", aggregation: TsAggregation.Count, timeBucket: 10)); + + // Aligh end + var resEnd = new List() + { + new TimeSeriesTuple(20, 1), + new TimeSeriesTuple(10, 1), + new TimeSeriesTuple(0, 2) + }; + Assert.Equal(resEnd, await db.TS().RevRangeAsync(key, 1, 30, align: "+", aggregation: TsAggregation.Count, timeBucket: 10)); + + // Align 1 + Assert.Equal(resStart, await db.TS().RevRangeAsync(key, 1, 30, align: 1, aggregation: TsAggregation.Count, timeBucket: 10)); + } + + [Fact] + public async Task TestMissingTimeBucket() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = await CreateData(db, key, 50); + var ex = await Assert.ThrowsAsync(async () => await db.TS().RevRangeAsync(key, "-", "+", aggregation: TsAggregation.Avg)); + Assert.Equal("RANGE Aggregation should have timeBucket value", ex.Message); + } + + [Fact] + public async Task TestFilterBy() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + var tuples = await CreateData(db, key, 50); + + var res = await db.TS().RevRangeAsync(key, "-", "+", filterByValue: (0, 2)); + Assert.Equal(3, res.Count); + Assert.Equal(ReverseData(tuples.GetRange(0, 3)), res); + + var filterTs = new List { 0, 50, 100 }; + res = await db.TS().RevRangeAsync(key, "-", "+", filterByTs: filterTs); + Assert.Equal(ReverseData(tuples.GetRange(0, 3)), res); + + res = await db.TS().RevRangeAsync(key, "-", "+", filterByTs: filterTs, filterByValue: (2, 5)); + Assert.Equal(tuples.GetRange(2, 1), res); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestRules.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestRules.cs new file mode 100644 index 00000000..acfd7c24 --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestRules.cs @@ -0,0 +1,106 @@ +using System; +using System.Collections.Generic; +using NRedisStack.Literals.Enums; +using NRedisStack.DataTypes; +using StackExchange.Redis; +using Xunit; +using NRedisStack.RedisStackCommands; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestRules : AbstractNRedisStackTest, IDisposable + { + private string srcKey = "RULES_TEST_SRC"; + + private Dictionary destKeys; + + public TestRules(RedisFixture redisFixture) : base(redisFixture) + { + + destKeys = new Dictionary + { + { TsAggregation.Avg, "RULES_DEST_" + TsAggregation.Avg }, + { TsAggregation.Count, "RULES_DEST_" + TsAggregation.Count }, + { TsAggregation.First, "RULES_DEST_" + TsAggregation.First }, + { TsAggregation.Last, "RULES_DEST_" + TsAggregation.Last }, + { TsAggregation.Max, "RULES_DEST_" + TsAggregation.Max }, + { TsAggregation.Min, "RULES_DEST_" + TsAggregation.Min }, + { TsAggregation.Range, "RULES_DEST_" + TsAggregation.Range }, + { TsAggregation.StdP, "RULES_DEST_" + TsAggregation.StdP }, + { TsAggregation.StdS, "RULES_DEST_" + TsAggregation.StdS }, + { TsAggregation.Sum, "RULES_DEST_" + TsAggregation.Sum }, + { TsAggregation.VarP, "RULES_DEST_" + TsAggregation.VarP }, + { TsAggregation.VarS, "RULES_DEST_" + TsAggregation.VarS } + }; + } + + public void Dispose() + { + redisFixture.Redis.GetDatabase().KeyDelete(srcKey); + foreach (var key in destKeys.Values) + { + redisFixture.Redis.GetDatabase().KeyDelete(key); + } + } + + [Fact] + public void TestRulesAdditionDeletion() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + db.TS().Create(srcKey); + foreach (var destKey in destKeys.Values) + { + db.TS().Create(destKey); + } + long timeBucket = 50; + var rules = new List(); + var rulesMap = new Dictionary(); + foreach (var aggregation in destKeys.Keys) + { + var rule = new TimeSeriesRule(destKeys[aggregation], timeBucket, aggregation); + rules.Add(rule); + rulesMap[aggregation] = rule; + Assert.True(db.TS().CreateRule(srcKey, rule)); + TimeSeriesInformation info = db.TS().Info(srcKey); + Assert.Equal(rules, info.Rules); + } + foreach (var aggregation in destKeys.Keys) + { + var rule = rulesMap[aggregation]; + rules.Remove(rule); + Assert.True(db.TS().DeleteRule(srcKey, rule.DestKey)); + TimeSeriesInformation info = db.TS().Info(srcKey); + Assert.Equal(rules, info.Rules); + } + } + + [Fact] + public void TestNonExistingSrc() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + string destKey = "RULES_DEST_" + TsAggregation.Avg; + db.TS().Create(destKey); + TimeSeriesRule rule = new TimeSeriesRule(destKey, 50, TsAggregation.Avg); + var ex = Assert.Throws(() => db.TS().CreateRule(srcKey, rule)); + Assert.Equal("ERR TSDB: the key does not exist", ex.Message); + ex = Assert.Throws(() => db.TS().DeleteRule(srcKey, destKey)); + Assert.Equal("ERR TSDB: the key does not exist", ex.Message); + } + + [Fact] + public void TestNonExisitingDestinaion() + { + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + string destKey = "RULES_DEST_" + TsAggregation.Avg; + db.TS().Create(srcKey); + TimeSeriesRule rule = new TimeSeriesRule(destKey, 50, TsAggregation.Avg); + var ex = Assert.Throws(() => db.TS().CreateRule(srcKey, rule)); + Assert.Equal("ERR TSDB: the key does not exist", ex.Message); + ex = Assert.Throws(() => db.TS().DeleteRule(srcKey, destKey)); + Assert.Equal("ERR TSDB: compaction rule does not exist", ex.Message); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestRulesAsync.cs b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestRulesAsync.cs new file mode 100644 index 00000000..bae3ff87 --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestAPI/TestRulesAsync.cs @@ -0,0 +1,92 @@ +using System; +using System.Linq; +using System.Collections.Generic; +using System.Threading.Tasks; +using StackExchange.Redis; +using NRedisStack.Literals.Enums; +using NRedisStack.DataTypes; +using Xunit; +using NRedisStack.RedisStackCommands; + +namespace NRedisStack.Tests.TimeSeries.TestAPI +{ + public class TestRulesAsync : AbstractNRedisStackTest + { + public TestRulesAsync(RedisFixture redisFixture) : base(redisFixture) { } + + [Fact] + public async Task TestRulesAdditionDeletion() + { + var key = CreateKeyName(); + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + await db.TS().CreateAsync(key); + var aggregations = (TsAggregation[])Enum.GetValues(typeof(TsAggregation)); + + foreach (var aggregation in aggregations) + { + await db.TS().CreateAsync($"{key}:{aggregation}"); + } + + var timeBucket = 50L; + var rules = new List(); + var rulesMap = new Dictionary(); + foreach (var aggregation in aggregations) + { + var rule = new TimeSeriesRule($"{key}:{aggregation}", timeBucket, aggregation); + rules.Add(rule); + rulesMap[aggregation] = rule; + Assert.True(await db.TS().CreateRuleAsync(key, rule)); + + var info = await db.TS().InfoAsync(key); + Assert.Equal(rules, info.Rules); + } + + foreach (var aggregation in aggregations) + { + var rule = rulesMap[aggregation]; + rules.Remove(rule); + Assert.True(await db.TS().DeleteRuleAsync(key, rule.DestKey)); + + var info = await db.TS().InfoAsync(key); + Assert.Equal(rules, info.Rules); + } + + await db.KeyDeleteAsync(aggregations.Select(i => (RedisKey)$"{key}:{i}").ToArray()); + } + + [Fact] + public async Task TestNonExistingSrc() + { + var key = CreateKeyName(); + var aggKey = $"{key}:{TsAggregation.Avg}"; + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + await db.TS().CreateAsync(aggKey); + var rule = new TimeSeriesRule(aggKey, 50, TsAggregation.Avg); + var ex = await Assert.ThrowsAsync(async () => await db.TS().CreateRuleAsync(key, rule)); + Assert.Equal("ERR TSDB: the key does not exist", ex.Message); + + ex = await Assert.ThrowsAsync(async () => await db.TS().DeleteRuleAsync(key, aggKey)); + Assert.Equal("ERR TSDB: the key does not exist", ex.Message); + + await db.KeyDeleteAsync(aggKey); + } + + [Fact] + public async Task TestNonExisitingDestinaion() + { + var key = CreateKeyName(); + var aggKey = $"{key}:{TsAggregation.Avg}"; + var db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + await db.TS().CreateAsync(key); + var rule = new TimeSeriesRule(aggKey, 50, TsAggregation.Avg); + var ex = await Assert.ThrowsAsync(async () => await db.TS().CreateRuleAsync(key, rule)); + Assert.Equal("ERR TSDB: the key does not exist", ex.Message); + + ex = await Assert.ThrowsAsync(async () => await db.TS().DeleteRuleAsync(key, aggKey)); + Assert.Equal("ERR TSDB: compaction rule does not exist", ex.Message); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestDataTypes/TestTimeSeriesInformation.cs b/tests/NRedisStack.Tests/TimeSeries/TestDataTypes/TestTimeSeriesInformation.cs new file mode 100644 index 00000000..b43debe3 --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestDataTypes/TestTimeSeriesInformation.cs @@ -0,0 +1,34 @@ +using StackExchange.Redis; +using NRedisStack.DataTypes; +using NRedisStack.Literals.Enums; +using System.Threading.Tasks; +using Xunit; +using NRedisStack.RedisStackCommands; +using NRedisStack.Tests.TimeSeries.TestAPI; +using NRedisStack.Tests; + + +namespace NRedisTimeSeries.Test.TestDataTypes +{ + public class TestInformation : AbstractNRedisStackTest + { + public TestInformation(NRedisStack.Tests.RedisFixture redisFixture) : base(redisFixture) { } + + [Fact] + public async Task TestInformationToStringAsync() + { + string key = CreateKeyName(); + IDatabase db = redisFixture.Redis.GetDatabase(); + db.Execute("FLUSHALL"); + await db.TS().AddAsync(key, "*", 1.1); + await db.TS().AddAsync(key, "*", 1.3, duplicatePolicy: TsDuplicatePolicy.LAST); + TimeSeriesInformation info = await db.TS().InfoAsync(key); + string[] infoProperties = ((string)info).Trim('{').Trim('}').Split(","); + Assert.Equal("\"TotalSamples\":2", infoProperties[0]); + Assert.Equal("\"MemoryUsage\":4184", infoProperties[1]); + Assert.Equal("\"RetentionTime\":0", infoProperties[4]); + Assert.Equal("\"ChunkCount\":1", infoProperties[5]); + Assert.Equal("\"DuplicatePolicy\":null", infoProperties[11]); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestDataTypes/TestTimeSeriesLabel.cs b/tests/NRedisStack.Tests/TimeSeries/TestDataTypes/TestTimeSeriesLabel.cs new file mode 100644 index 00000000..44f39e5b --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestDataTypes/TestTimeSeriesLabel.cs @@ -0,0 +1,51 @@ +using System; +using NRedisStack.DataTypes; +using Xunit; + +namespace NRedisTimeSeries.Test.TestDataTypes +{ + public class TestLabel + { + [Fact] + public void TestLabelConstructor() + { + TimeSeriesLabel label = new TimeSeriesLabel("a", "b"); + Assert.Equal("a", label.Key); + Assert.Equal("b", label.Value); + } + + + [Fact] + public void TestLbaelEquals() + { + TimeSeriesLabel label_ab = new TimeSeriesLabel("a", "b"); + TimeSeriesLabel label1 = new TimeSeriesLabel("a", "b"); + TimeSeriesLabel label2 = new TimeSeriesLabel("a", "c"); + TimeSeriesLabel label3 = new TimeSeriesLabel("c", "b"); + + Assert.Equal(label_ab, label1); + Assert.NotEqual(label_ab, label2); + Assert.NotEqual(label_ab, label3); + } + + [Fact] + public void TestLabelHashCode() + { + TimeSeriesLabel label_ab = new TimeSeriesLabel("a", "b"); + TimeSeriesLabel label1 = new TimeSeriesLabel("a", "b"); + TimeSeriesLabel label2 = new TimeSeriesLabel("a", "c"); + TimeSeriesLabel label3 = new TimeSeriesLabel("c", "b"); + + Assert.Equal(label_ab.GetHashCode(), label1.GetHashCode()); + Assert.NotEqual(label_ab.GetHashCode(), label2.GetHashCode()); + Assert.NotEqual(label_ab.GetHashCode(), label3.GetHashCode()); + } + + [Fact] + public void TestLabelToString() + { + TimeSeriesLabel label = new TimeSeriesLabel("a", "b"); + Assert.Equal("Key: a, Val:b", (string)label); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestDataTypes/TestTimeSeriesRule.cs b/tests/NRedisStack.Tests/TimeSeries/TestDataTypes/TestTimeSeriesRule.cs new file mode 100644 index 00000000..dd6b890a --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestDataTypes/TestTimeSeriesRule.cs @@ -0,0 +1,61 @@ +using System; +using NRedisStack.RedisStackCommands; +using NRedisStack.Literals.Enums; +using NRedisStack.DataTypes; +using Xunit; + +namespace NRedisTimeSeries.Test.TestDataTypes +{ + public class TestTimeSeriesRule + { + public TestTimeSeriesRule() { } + + [Fact] + public void TestRuleConstructor() + { + TimeSeriesRule rule = new TimeSeriesRule("key", 50, TsAggregation.Avg); + Assert.Equal("key", rule.DestKey); + Assert.Equal(TsAggregation.Avg, rule.Aggregation); + Assert.Equal(50, rule.TimeBucket); + } + + [Fact] + public void TestRuleEquals() + { + TimeSeriesRule rule = new TimeSeriesRule("key", 50, TsAggregation.Avg); + + TimeSeriesRule rule1 = new TimeSeriesRule("key", 50, TsAggregation.Avg); + TimeSeriesRule rule2 = new TimeSeriesRule("key2", 50, TsAggregation.Avg); + TimeSeriesRule rule3 = new TimeSeriesRule("key", 51, TsAggregation.Avg); + TimeSeriesRule rule4 = new TimeSeriesRule("key", 50, TsAggregation.Count); + + Assert.Equal(rule, rule1); + Assert.NotEqual(rule, rule2); + Assert.NotEqual(rule, rule3); + Assert.NotEqual(rule, rule4); + } + + [Fact] + public void TestRuleHashCode() + { + TimeSeriesRule rule = new TimeSeriesRule("key", 50, TsAggregation.Avg); + + TimeSeriesRule rule1 = new TimeSeriesRule("key", 50, TsAggregation.Avg); + TimeSeriesRule rule2 = new TimeSeriesRule("key2", 50, TsAggregation.Avg); + TimeSeriesRule rule3 = new TimeSeriesRule("key", 51, TsAggregation.Avg); + TimeSeriesRule rule4 = new TimeSeriesRule("key", 50, TsAggregation.Count); + + Assert.Equal(rule.GetHashCode(), rule1.GetHashCode()); + Assert.NotEqual(rule.GetHashCode(), rule2.GetHashCode()); + Assert.NotEqual(rule.GetHashCode(), rule3.GetHashCode()); + Assert.NotEqual(rule.GetHashCode(), rule4.GetHashCode()); + } + + [Fact] + public void TestRuleToString() + { + TimeSeriesRule rule = new TimeSeriesRule("key", 50, TsAggregation.Avg); + Assert.Equal("DestinationKey: key, TimeBucket: 50, Aggregation: AVG", (string)rule); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestDataTypes/TestTimeSeriesTuple.cs b/tests/NRedisStack.Tests/TimeSeries/TestDataTypes/TestTimeSeriesTuple.cs new file mode 100644 index 00000000..c2195b84 --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestDataTypes/TestTimeSeriesTuple.cs @@ -0,0 +1,44 @@ +using System; +using NRedisStack.DataTypes; +using Xunit; + +namespace NRedisTimeSeries.Test +{ + public class TestTimeSeriesTuple + { + [Fact] + public void TestTupleConstructor() + { + TimeSeriesTuple tuple = new TimeSeriesTuple(1, 1.1); + Assert.Equal(1, tuple.Time); + Assert.Equal(1.1, tuple.Val); + } + + [Fact] + public void TestTupleEqual() + { + TimeSeriesTuple tuple1 = new TimeSeriesTuple(1, 1.1); + TimeSeriesTuple tuple1_1 = new TimeSeriesTuple(1, 1.1); + TimeSeriesTuple tuple1_2 = new TimeSeriesTuple(2, 2.2); + Assert.Equal(tuple1, tuple1_1); + Assert.NotEqual(tuple1, tuple1_2); + } + + [Fact] + public void TestTupleHashCode() + { + TimeSeriesTuple tuple1 = new TimeSeriesTuple(1, 1.1); + TimeSeriesTuple tuple1_1 = new TimeSeriesTuple(1, 1.1); + TimeSeriesTuple tuple1_2 = new TimeSeriesTuple(2, 2.2); + Assert.Equal(tuple1.GetHashCode(), tuple1_1.GetHashCode()); + Assert.NotEqual(tuple1.GetHashCode(), tuple1_2.GetHashCode()); + } + + [Fact] + public void TestTupleToString() + { + TimeSeriesTuple tuple = new TimeSeriesTuple(1, 1.1); + Assert.Equal("Time: 1, Val:1.1", (string)tuple); + } + } +} diff --git a/tests/NRedisStack.Tests/TimeSeries/TestDataTypes/TestTimeStamp.cs b/tests/NRedisStack.Tests/TimeSeries/TestDataTypes/TestTimeStamp.cs new file mode 100644 index 00000000..ccda7e6e --- /dev/null +++ b/tests/NRedisStack.Tests/TimeSeries/TestDataTypes/TestTimeStamp.cs @@ -0,0 +1,33 @@ +using System; +using NRedisStack.DataTypes; +using Xunit; +namespace NRedisTimeSeries.Test +{ + public class TestTimeStamp + { + + [Fact] + public void TestTimeStampImplicitCast() + { + TimeStamp ts = 1; + Assert.Equal(1, ts); + + ts = "+"; + Assert.Equal("+", ts); + + ts = "*"; + Assert.Equal("*", ts); + + ts = "-"; + Assert.Equal("-", ts); + + var ex = Assert.Throws(() => ts = "hi"); + Assert.Equal("The string hi cannot be used", ex.Message); + + DateTime now = DateTime.UtcNow; + ts = now; + Assert.Equal(now, ts); + + } + } +}