From 9e0548432b2b82c361d2adfa84e259c89a7265b0 Mon Sep 17 00:00:00 2001 From: Joe DeVivo Date: Wed, 9 Oct 2013 14:33:03 -0700 Subject: [PATCH 1/2] moved in bitcask schema bits from riak.schema --- priv/bitcask.schema | 495 ++++++++++++++++++++++++++++++++++ rebar.config | 3 +- test/bitcask_schema_tests.erl | 120 +++++++++ 3 files changed, 617 insertions(+), 1 deletion(-) create mode 100644 priv/bitcask.schema create mode 100644 test/bitcask_schema_tests.erl diff --git a/priv/bitcask.schema b/priv/bitcask.schema new file mode 100644 index 00000000..8534dd82 --- /dev/null +++ b/priv/bitcask.schema @@ -0,0 +1,495 @@ +%%%% bitcask + +%% @doc bitcask data root +{mapping, "bitcask.data_root", "bitcask.data_root", [ + {default, "{{platform_data_dir}}/bitcask"} +]}. + +%% @doc The open_timeout setting specifies the maximum time Bitcask will +%% block on startup while attempting to create or open the data directory. +%% The value is in seconds and the default is 4. You generally need not +%% change this value. If for some reason the timeout is exceeded on open +%% you'll see a log message of the form: +%% "Failed to start bitcask backend: .... " +%% Only then should you consider a longer timeout. +{mapping, "bitcask.open_timeout", "bitcask.open_timeout", [ + {default, 4}, + {datatype, integer}, + {level, advanced} +]}. + +%% @doc The `sync_strategy` setting changes the durability of writes by specifying +%% when to synchronize data to disk. The default setting protects against data +%% loss in the event of application failure (process death) but leaves open a +%% small window wherein data could be lost in the event of complete system +%% failure (e.g. hardware, O/S, power). +%% +%% The default mode, `none`, writes data into operating system buffers which +%% which will be written to the disks when those buffers are flushed by the +%% operating system. If the system fails (power loss, crash, etc.) before +%% before those buffers are flushed to stable storage that data is lost. +%% +%% This is prevented by the setting `o_sync` which forces the operating system +%% to flush to stable storage at every write. The effect of flushing each +%% write is better durability, however write throughput will suffer as each +%% write will have to wait for the write to complete. +%% +%% ___Available Sync Strategies___ +%% +%% * `none` - (default) Lets the operating system manage syncing writes. +%% * `o_sync` - Uses the O_SYNC flag which forces syncs on every write. +%% * `interval` - Riak will force Bitcask to sync every `bitcask.sync_interval` seconds. +{mapping, "bitcask.sync_strategy", "bitcask.sync_strategy", [ + {default, none}, + {datatype, {enum, [none, o_sync, interval]}}, + {level, advanced} +]}. + +{mapping, "bitcask.sync_interval", "bitcask.sync_strategy", [ + {datatype, {duration, s}}, + {level, advanced} +]}. + +{translation, + "bitcask.sync_strategy", + fun(Conf) -> + Setting = cuttlefish_util:conf_get_value("bitcask.sync_strategy", Conf), + case Setting of + none -> none; + o_sync -> o_sync; + interval -> + Interval = cuttlefish_util:conf_get_value("bitcask.sync_interval", Conf, undefined), + {seconds, Interval}; + _Default -> none + end + end}. + +%% @doc The `max_file_size` setting describes the maximum permitted size for any +%% single data file in the Bitcask directory. If a write causes the current +%% file to exceed this size threshold then that file is closed, and a new file +%% is opened for writes. +{mapping, "bitcask.max_file_size", "bitcask.max_file_size", [ + {default, "2GB"}, + {datatype, bytesize}, + {level, advanced} +]}. + + +%% @doc The `merge_window` setting lets you specify when during the day merge +%% operations are allowed to be triggered. Valid options are: +%% +%% * `always` (default) No restrictions +%% * `never` Merge will never be attempted +%% * `window` Hours during which merging is permitted, where +%% `bitcask.merge_window.start` and +%% `bitcask.merge_window.end` are integers between 0 and 23. +%% +%% If merging has a significant impact on performance of your cluster, or your +%% cluster has quiet periods in which little storage activity occurs, you may +%% want to change this setting from the default. +{mapping, "bitcask.merge_window", "bitcask.merge_window", [ + {default, always}, + {datatype, {enum, [always, never, window]}}, + {level, advanced} +]}. + +{mapping, "bitcask.merge_window.start", "bitcask.merge_window", [ + {default, 0}, + {datatype, integer}, + {level, advanced} +]}. + +{mapping, "bitcask.merge_window.end", "bitcask.merge_window", [ + {default, 23}, + {datatype, integer}, + {level, advanced} +]}. + + +{translation, + "bitcask.merge_window", + fun(Conf) -> + Setting = cuttlefish_util:conf_get_value("bitcask.merge_window", Conf), + case Setting of + always -> always; + never -> never; + window -> + Start = cuttlefish_util:conf_get_value("bitcask.merge_window.start", Conf, undefined), + End = cuttlefish_util:conf_get_value("bitcask.merge_window.end", Conf, undefined), + {Start, End}; + _Default -> always + end + end}. + +%% @doc `frag_merge_trigger` setting describes what ratio of +%% dead keys to total keys in a file will trigger merging. The value of this +%% setting is a percentage (0-100). For example, if a data file contains 6 +%% dead keys and 4 live keys, then merge will be triggered at the default +%% setting. Increasing this value will cause merging to occur less often, +%% whereas decreasing the value will cause merging to happen more often. +%% +%% Default is: `60` +{mapping, "bitcask.frag_merge_trigger", "bitcask.frag_merge_trigger", [ + {datatype, integer}, + {level, advanced}, + {default, 60} +]}. + + +%% @doc `dead_bytes_merge_trigger` setting describes how much +%% data stored for dead keys in a single file will trigger merging. The +%% value is in bytes. If a file meets or exceeds the trigger value for dead +%% bytes, merge will be triggered. Increasing the value will cause merging +%% to occur less often, whereas decreasing the value will cause merging to +%% happen more often. +%% +%% When either of these constraints are met by any file in the directory, +%% Bitcask will attempt to merge files. +%% +%% Default is: 512MB in bytes +{mapping, "bitcask.dead_bytes_merge_trigger", "bitcask.dead_bytes_merge_trigger", [ + {datatype, bytesize}, + {level, advanced}, + {default, "512MB"} +]}. + +%% @doc `frag_threshold` setting describes what ratio of +%% dead keys to total keys in a file will cause it to be included in the +%% merge. The value of this setting is a percentage (0-100). For example, +%% if a data file contains 4 dead keys and 6 live keys, it will be included +%% in the merge at the default ratio. Increasing the value will cause fewer +%% files to be merged, decreasing the value will cause more files to be +%% merged. +%% +%% Default is: `40` +{mapping, "bitcask.frag_threshold", "bitcask.frag_threshold", [ + {datatype, integer}, + {level, advanced}, + {default, 40} +]}. + +%% @doc `dead_bytes_threshold` setting describes the minimum +%% amount of data occupied by dead keys in a file to cause it to be included +%% in the merge. Increasing the value will cause fewer files to be merged, +%% decreasing the value will cause more files to be merged. +%% +%% Default is: 128MB in bytes +{mapping, "bitcask.dead_bytes_threshold", "bitcask.dead_bytes_threshold", [ + {datatype, bytesize}, + {level, advanced}, + {default, "128MB"} +]}. + +%% @doc `small_file_threshold` setting describes the minimum +%% size a file must have to be _excluded_ from the merge. Files smaller +%% than the threshold will be included. Increasing the value will cause +%% _more_ files to be merged, decreasing the value will cause _fewer_ files +%% to be merged. +%% +%% Default is: 10MB in bytes +{mapping, "bitcask.small_file_threshold", "bitcask.small_file_threshold", [ + {datatype, bytesize}, + {level, advanced}, + {default, "10MB"} +]}. + +%% @doc Fold keys thresholds will reuse the keydir if another fold was started less +%% than `max_fold_age` ago and there were less than `max_fold_puts` updates. +%% Otherwise it will wait until all current fold keys complete and then start. +%% Set either option to -1 to disable. +%% Age in micro seconds (-1 means "unlimited") +{mapping, "bitcask.max_fold_age", "bitcask.max_fold_age", [ + {datatype, integer}, + {level, advanced}, + {default, -1} +]}. + +{mapping, "bitcask.max_fold_puts", "bitcask.max_fold_puts", [ + {datatype, integer}, + {level, advanced}, + {default, 0} +]}. + +%% @doc By default, Bitcask keeps all of your data around. If your data has +%% limited time-value, or if for space reasons you need to purge data, you can +%% set the `expiry_secs` option. If you needed to purge data automatically +%% after 1 day, set the value to `1d`. +%% +%% Default is: `-1` which disables automatic expiration +{mapping, "bitcask.expiry", "bitcask.expiry_secs", [ + {datatype, {duration, s}}, + {level, advanced}, + {default, -1} +]}. + + +%% @doc Require the CRC to be present at the end of hintfiles. +%% Bitcask defaults to a backward compatible mode where +%% old hint files will still be accepted without them. +%% It is safe to set this true for new deployments and will +%% become the default setting in a future release. +{mapping, "bitcask.require_hint_crc", "bitcask.require_hint_crc", [ + {default, true}, + {datatype, {enum, [true, false]}}, + {level, advanced} +]}. + +%% By default, Bitcask will trigger a merge whenever a data file contains +%% an expired key. This may result in excessive merging under some usage +%% patterns. To prevent this you can set the `expiry_grace_time` option. +%% Bitcask will defer triggering a merge solely for key expiry by the +%% configured number of seconds. Setting this to `1h` effectively limits +%% each cask to merging for expiry once per hour. +%% +%% Default is: `0` +{mapping, "bitcask.expiry_grace_time", "bitcask.expiry_grace_time", [ + {datatype, {duration, s}}, + {level, advanced}, + {default, 0} +]}. + +%% @doc Configure how Bitcask writes data to disk. +%% erlang: Erlang's built-in file API +%% nif: Direct calls to the POSIX C API +%% +%% The NIF mode provides higher throughput for certain +%% workloads, but has the potential to negatively impact +%% the Erlang VM, leading to higher worst-case latencies +%% and possible throughput collapse. +{mapping, "bitcask.io_mode", "bitcask.io_mode", [ + {default, erlang}, + {datatype, {enum, [erlang, nif]}} +]}. + +%% @doc bitcask data root +{mapping, "multi_backend.$name.bitcask.data_root", "riak_kv.multi_backend", [ + {level, advanced} +]}. + + +%% @doc The open_timeout setting specifies the maximum time Bitcask will +%% block on startup while attempting to create or open the data directory. +%% The value is in seconds and the default is 4. You generally need not +%% change this value. If for some reason the timeout is exceeded on open +%% you'll see a log message of the form: +%% "Failed to start bitcask backend: .... " +%% Only then should you consider a longer timeout. +{mapping, "multi_backend.$name.bitcask.open_timeout", "riak_kv.multi_backend", [ + {default, 4}, + {datatype, integer}, + {level, advanced} +]}. + +%% @doc The `sync_strategy` setting changes the durability of writes by specifying +%% when to synchronize data to disk. The default setting protects against data +%% loss in the event of application failure (process death) but leaves open a +%% small window wherein data could be lost in the event of complete system +%% failure (e.g. hardware, O/S, power). +%% +%% The default mode, `none`, writes data into operating system buffers which +%% which will be written to the disks when those buffers are flushed by the +%% operating system. If the system fails (power loss, crash, etc.) before +%% before those buffers are flushed to stable storage that data is lost. +%% +%% This is prevented by the setting `o_sync` which forces the operating system +%% to flush to stable storage at every write. The effect of flushing each +%% write is better durability, however write throughput will suffer as each +%% write will have to wait for the write to complete. +%% +%% ___Available Sync Strategies___ +%% +%% * `none` - (default) Lets the operating system manage syncing writes. +%% * `o_sync` - Uses the O_SYNC flag which forces syncs on every write. +%% * `interval` - Riak will force Bitcask to sync every `bitcask.sync_interval` seconds. +{mapping, "multi_backend.$name.bitcask.sync_strategy", "riak_kv.multi_backend", [ + {default, none}, + {datatype, {enum, [none, o_sync, interval]}}, + {level, advanced} +]}. + +{mapping, "multi_backend.$name.bitcask.sync_interval", "riak_kv.multi_backend", [ + {datatype, {duration, s}}, + {level, advanced} +]}. + +%% @doc The `max_file_size` setting describes the maximum permitted size for any +%% single data file in the Bitcask directory. If a write causes the current +%% file to exceed this size threshold then that file is closed, and a new file +%% is opened for writes. +{mapping, "multi_backend.$name.bitcask.max_file_size", "riak_kv.multi_backend", [ + {default, "2GB"}, + {datatype, bytesize}, + {level, advanced} +]}. + + +%% @doc The `merge_window` setting lets you specify when during the day merge +%% operations are allowed to be triggered. Valid options are: +%% +%% * `always` (default) No restrictions +%% * `never` Merge will never be attempted +%% * `window` Hours during which merging is permitted, where +%% `bitcask.merge_window.start` and +%% `bitcask.merge_window.end` are integers between 0 and 23. +%% +%% If merging has a significant impact on performance of your cluster, or your +%% cluster has quiet periods in which little storage activity occurs, you may +%% want to change this setting from the default. +{mapping, "multi_backend.$name.bitcask.merge_window", "riak_kv.multi_backend", [ + {default, always}, + {datatype, {enum, [always, never, window]}}, + {level, advanced} +]}. + +{mapping, "multi_backend.$name.bitcask.merge_window.start", "riak_kv.multi_backend", [ + {default, 0}, + {datatype, integer}, + {level, advanced} +]}. + +{mapping, "multi_backend.$name.bitcask.merge_window.end", "riak_kv.multi_backend", [ + {default, 23}, + {datatype, integer}, + {level, advanced} +]}. + +%% @doc `frag_merge_trigger` setting describes what ratio of +%% dead keys to total keys in a file will trigger merging. The value of this +%% setting is a percentage (0-100). For example, if a data file contains 6 +%% dead keys and 4 live keys, then merge will be triggered at the default +%% setting. Increasing this value will cause merging to occur less often, +%% whereas decreasing the value will cause merging to happen more often. +%% +%% Default is: `60` +{mapping, "multi_backend.$name.bitcask.frag_merge_trigger", "riak_kv.multi_backend", [ + {datatype, integer}, + {level, advanced}, + {default, 60} +]}. + + +%% @doc `dead_bytes_merge_trigger` setting describes how much +%% data stored for dead keys in a single file will trigger merging. The +%% value is in bytes. If a file meets or exceeds the trigger value for dead +%% bytes, merge will be triggered. Increasing the value will cause merging +%% to occur less often, whereas decreasing the value will cause merging to +%% happen more often. +%% +%% When either of these constraints are met by any file in the directory, +%% Bitcask will attempt to merge files. +%% +%% Default is: 512mb in bytes +{mapping, "multi_backend.$name.bitcask.dead_bytes_merge_trigger", "riak_kv.multi_backend", [ + {datatype, bytesize}, + {level, advanced}, + {default, "512MB"} +]}. + +%% @doc `frag_threshold` setting describes what ratio of +%% dead keys to total keys in a file will cause it to be included in the +%% merge. The value of this setting is a percentage (0-100). For example, +%% if a data file contains 4 dead keys and 6 live keys, it will be included +%% in the merge at the default ratio. Increasing the value will cause fewer +%% files to be merged, decreasing the value will cause more files to be +%% merged. +%% +%% Default is: `40` +{mapping, "multi_backend.$name.bitcask.frag_threshold", "riak_kv.multi_backend", [ + {datatype, integer}, + {level, advanced}, + {default, 40} +]}. + +%% @doc `dead_bytes_threshold` setting describes the minimum +%% amount of data occupied by dead keys in a file to cause it to be included +%% in the merge. Increasing the value will cause fewer files to be merged, +%% decreasing the value will cause more files to be merged. +%% +%% Default is: 128mb in bytes +{mapping, "multi_backend.$name.bitcask.dead_bytes_threshold", "riak_kv.multi_backend", [ + {datatype, bytesize}, + {level, advanced}, + {default, "128MB"} +]}. + +%% @doc `small_file_threshold` setting describes the minimum +%% size a file must have to be _excluded_ from the merge. Files smaller +%% than the threshold will be included. Increasing the value will cause +%% _more_ files to be merged, decreasing the value will cause _fewer_ files +%% to be merged. +%% +%% Default is: 10mb in bytes +{mapping, "multi_backend.$name.bitcask.small_file_threshold", "riak_kv.multi_backend", [ + {datatype, bytesize}, + {level, advanced}, + {default, "10MB"} +]}. + +%% @doc Fold keys thresholds will reuse the keydir if another fold was started less +%% than `max_fold_age` ago and there were less than `max_fold_puts` updates. +%% Otherwise it will wait until all current fold keys complete and then start. +%% Set either option to -1 to disable. +%% Age in micro seconds (-1 means "unlimited") +{mapping, "multi_backend.$name.bitcask.max_fold_age", "riak_kv.multi_backend", [ + {datatype, integer}, + {level, advanced}, + {default, -1} +]}. + +{mapping, "multi_backend.$name.bitcask.max_fold_puts", "riak_kv.multi_backend", [ + {datatype, integer}, + {level, advanced}, + {default, 0} +]}. + +%% @doc By default, Bitcask keeps all of your data around. If your data has +%% limited time-value, or if for space reasons you need to purge data, you can +%% set the `expiry_secs` option. If you needed to purge data automatically +%% after 1 day, set the value to `1d`. +%% +%% Default is: `-1` which disables automatic expiration +{mapping, "multi_backend.$name.bitcask.expiry", "riak_kv.multi_backend", [ + {datatype, {duration, s}}, + {level, advanced}, + {default, -1} +]}. + + +%% @doc Require the CRC to be present at the end of hintfiles. +%% Bitcask defaults to a backward compatible mode where +%% old hint files will still be accepted without them. +%% It is safe to set this true for new deployments and will +%% become the default setting in a future release. +{mapping, "multi_backend.$name.bitcask.require_hint_crc", "riak_kv.multi_backend", [ + {default, true}, + {datatype, {enum, [true, false]}}, + {level, advanced} +]}. + +%% By default, Bitcask will trigger a merge whenever a data file contains +%% an expired key. This may result in excessive merging under some usage +%% patterns. To prevent this you can set the `expiry_grace_time` option. +%% Bitcask will defer triggering a merge solely for key expiry by the +%% configured number of seconds. Setting this to `1h` effectively limits +%% each cask to merging for expiry once per hour. +%% +%% Default is: `0` +{mapping, "multi_backend.$name.bitcask.expiry_grace_time", "riak_kv.multi_backend", [ + {datatype, {duration, s}}, + {level, advanced}, + {default, 0} +]}. + +%% @doc Configure how Bitcask writes data to disk. +%% erlang: Erlang's built-in file API +%% nif: Direct calls to the POSIX C API +%% +%% The NIF mode provides higher throughput for certain +%% workloads, but has the potential to negatively impact +%% the Erlang VM, leading to higher worst-case latencies +%% and possible throughput collapse. +{mapping, "multi_backend.$name.bitcask.io_mode", "riak_kv.multi_backend", [ + {default, erlang}, + {datatype, {enum, [erlang, nif]}}, + {level, advanced} +]}. diff --git a/rebar.config b/rebar.config index cef4c325..28f6606a 100644 --- a/rebar.config +++ b/rebar.config @@ -1,7 +1,8 @@ {port_specs, [{"priv/bitcask.so", ["c_src/*.c"]}]}. {deps, [ - {meck, "0.8.1", {git, "git://github.com/basho/meck.git", {tag, "0.8.1"}}} + {meck, "0.8.1", {git, "git://github.com/basho/meck.git", {tag, "0.8.1"}}}, + {cuttlefish, ".*", {git, "git://github.com/basho/cuttlefish.git", {branch, "develop"}}} ]}. {port_env, [ diff --git a/test/bitcask_schema_tests.erl b/test/bitcask_schema_tests.erl new file mode 100644 index 00000000..1cfc2e19 --- /dev/null +++ b/test/bitcask_schema_tests.erl @@ -0,0 +1,120 @@ +-module(bitcask_schema_tests). + +-include_lib("eunit/include/eunit.hrl"). +-compile(export_all). + +%% basic schema test will check to make sure that all defaults from the schema +%% make it into the generated app.config +basic_schema_test() -> + %% The defaults are defined in ../priv/bitcask.schema. it is the file under test. + Config = cuttlefish_unit:generate_templated_config("../priv/bitcask.schema", [], context()), + + cuttlefish_unit:assert_config(Config, "bitcask.data_root", "./data/bitcask"), + cuttlefish_unit:assert_config(Config, "bitcask.open_timeout", 4), + cuttlefish_unit:assert_config(Config, "bitcask.sync_strategy", none), + cuttlefish_unit:assert_config(Config, "bitcask.max_file_size", 2147483648), + cuttlefish_unit:assert_config(Config, "bitcask.merge_window", always), + cuttlefish_unit:assert_config(Config, "bitcask.frag_merge_trigger", 60), + cuttlefish_unit:assert_config(Config, "bitcask.dead_bytes_merge_trigger", 536870912), + cuttlefish_unit:assert_config(Config, "bitcask.frag_threshold", 40), + cuttlefish_unit:assert_config(Config, "bitcask.dead_bytes_threshold", 134217728), + cuttlefish_unit:assert_config(Config, "bitcask.small_file_threshold", 10485760), + cuttlefish_unit:assert_config(Config, "bitcask.max_fold_age", -1), + cuttlefish_unit:assert_config(Config, "bitcask.max_fold_puts", 0), + cuttlefish_unit:assert_config(Config, "bitcask.expiry_secs", -1), + cuttlefish_unit:assert_config(Config, "bitcask.require_hint_crc", true), + cuttlefish_unit:assert_config(Config, "bitcask.expiry_grace_time", 0), + cuttlefish_unit:assert_config(Config, "bitcask.io_mode", erlang), + + %% Make sure no multi_backend + cuttlefish_unit:assert_config(Config, "riak_kv.multi_backend", undefined), + ok. + +merge_window_test() -> + Conf = [ + {["bitcask", "merge_window"], window}, + {["bitcask", "merge_window", "start"], 0}, + {["bitcask", "merge_window", "end"], 12} + ], + + %% The defaults are defined in ../priv/bitcask.schema. it is the file under test. + Config = cuttlefish_unit:generate_templated_config("../priv/bitcask.schema", Conf, context()), + + cuttlefish_unit:assert_config(Config, "bitcask.data_root", "./data/bitcask"), + cuttlefish_unit:assert_config(Config, "bitcask.open_timeout", 4), + cuttlefish_unit:assert_config(Config, "bitcask.sync_strategy", none), + cuttlefish_unit:assert_config(Config, "bitcask.max_file_size", 2147483648), + cuttlefish_unit:assert_config(Config, "bitcask.merge_window", {0, 12}), + cuttlefish_unit:assert_config(Config, "bitcask.frag_merge_trigger", 60), + cuttlefish_unit:assert_config(Config, "bitcask.dead_bytes_merge_trigger", 536870912), + cuttlefish_unit:assert_config(Config, "bitcask.frag_threshold", 40), + cuttlefish_unit:assert_config(Config, "bitcask.dead_bytes_threshold", 134217728), + cuttlefish_unit:assert_config(Config, "bitcask.small_file_threshold", 10485760), + cuttlefish_unit:assert_config(Config, "bitcask.max_fold_age", -1), + cuttlefish_unit:assert_config(Config, "bitcask.max_fold_puts", 0), + cuttlefish_unit:assert_config(Config, "bitcask.expiry_secs", -1), + cuttlefish_unit:assert_config(Config, "bitcask.require_hint_crc", true), + cuttlefish_unit:assert_config(Config, "bitcask.expiry_grace_time", 0), + cuttlefish_unit:assert_config(Config, "bitcask.io_mode", erlang), + + %% Make sure no multi_backend + cuttlefish_unit:assert_config(Config, "riak_kv.multi_backend", undefined), + ok. + +override_schema_test() -> + %% Conf represents the riak.conf file that would be read in by cuttlefish. + %% this proplists is what would be output by the conf_parse module + Conf = [ + {["bitcask", "data_root"], "/absolute/data/bitcask"}, + {["bitcask", "open_timeout"], 2}, + {["bitcask", "sync_strategy"], interval}, + {["bitcask", "sync_interval"], "10s"}, + {["bitcask", "max_file_size"], "4GB"}, + {["bitcask", "merge_window"], never}, + {["bitcask", "merge_window", "start"], 0}, + {["bitcask", "merge_window", "end"], 12}, + {["bitcask", "frag_merge_trigger"], 20}, + {["bitcask", "dead_bytes_merge_trigger"], "256MB"}, + {["bitcask", "frag_threshold"], 10}, + {["bitcask", "dead_bytes_threshold"], "64MB"}, + {["bitcask", "small_file_threshold"], "5MB"}, + {["bitcask", "max_fold_age"], 12}, + {["bitcask", "max_fold_puts"], 7}, + {["bitcask", "expiry"], "20s" }, + {["bitcask", "require_hint_crc"], false }, + {["bitcask", "expiry_grace_time"], "15s" }, + {["bitcask", "io_mode"], nif} + ], + + %% The defaults are defined in ../priv/bitcask.schema. it is the file under test. + Config = cuttlefish_unit:generate_templated_config("../priv/bitcask.schema", Conf, context()), + + cuttlefish_unit:assert_config(Config, "bitcask.data_root", "/absolute/data/bitcask"), + cuttlefish_unit:assert_config(Config, "bitcask.open_timeout", 2), + cuttlefish_unit:assert_config(Config, "bitcask.sync_strategy", {seconds, 10}), + cuttlefish_unit:assert_config(Config, "bitcask.max_file_size", 4294967296), + cuttlefish_unit:assert_config(Config, "bitcask.merge_window", never), + cuttlefish_unit:assert_config(Config, "bitcask.frag_merge_trigger", 20), + cuttlefish_unit:assert_config(Config, "bitcask.dead_bytes_merge_trigger", 268435456), + cuttlefish_unit:assert_config(Config, "bitcask.frag_threshold", 10), + cuttlefish_unit:assert_config(Config, "bitcask.dead_bytes_threshold", 67108864), + cuttlefish_unit:assert_config(Config, "bitcask.small_file_threshold", 5242880), + cuttlefish_unit:assert_config(Config, "bitcask.max_fold_age", 12), + cuttlefish_unit:assert_config(Config, "bitcask.max_fold_puts", 7), + cuttlefish_unit:assert_config(Config, "bitcask.expiry_secs", 20), + cuttlefish_unit:assert_config(Config, "bitcask.require_hint_crc", false), + cuttlefish_unit:assert_config(Config, "bitcask.expiry_grace_time", 15), + cuttlefish_unit:assert_config(Config, "bitcask.io_mode", nif), + + %% Make sure no multi_backend + cuttlefish_unit:assert_config(Config, "riak_kv.multi_backend", undefined), + ok. + +%% this context() represents the substitution variables that rebar will use during the build process. +%% riak_core's schema file is written with some {{mustache_vars}} for substitution during packaging +%% cuttlefish doesn't have a great time parsing those, so we perform the substitutions first, because +%% that's how it would work in real life. +context() -> + [ + {platform_data_dir, "./data"} + ]. From 36e91bb27a9b358d3209d14e2ac8c122ee84b183 Mon Sep 17 00:00:00 2001 From: Joe DeVivo Date: Tue, 15 Oct 2013 12:37:04 -0700 Subject: [PATCH 2/2] reworded comment for require_crc_hint in schema --- priv/bitcask.schema | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/priv/bitcask.schema b/priv/bitcask.schema index 8534dd82..514e645c 100644 --- a/priv/bitcask.schema +++ b/priv/bitcask.schema @@ -224,10 +224,8 @@ %% @doc Require the CRC to be present at the end of hintfiles. -%% Bitcask defaults to a backward compatible mode where -%% old hint files will still be accepted without them. -%% It is safe to set this true for new deployments and will -%% become the default setting in a future release. +%% Setting this to false runs Bitcask in a backward compatible mode +%% where old hint files will still be accepted without CRC signatures. {mapping, "bitcask.require_hint_crc", "bitcask.require_hint_crc", [ {default, true}, {datatype, {enum, [true, false]}},