This file is indexed.

/usr/lib/erlang/lib/bitcask-2.0.6/priv/bitcask.schema is in erlang-bitcask 2.0.6+dfsg-1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
%% -*- erlang -*-

%%%% bitcask

%% @doc A path under which bitcask data files will be stored.
{mapping, "bitcask.data_root", "bitcask.data_root", [
  {default, "$(platform_data_dir)/bitcask"},
  {datatype, directory}
]}.

%% @doc Specifies the maximum time Bitcask will block on startup while
%% attempting to create or open the data directory. You generally need
%% not change this value. If for some reason the timeout is exceeded
%% on open you'll see a log message of the form: "Failed to start
%% bitcask backend: .... " Only then should you consider a longer
%% timeout.
{mapping, "bitcask.open_timeout", "bitcask.open_timeout", [
  {default, "4s"},
  {datatype, {duration, s}},
  hidden
]}.

%% @doc Changes the durability of writes by specifying when to
%% synchronize data to disk. The default setting protects against data
%% loss in the event of application failure (process death) but leaves
%% open a small window wherein data could be lost in the event of
%% complete system failure (e.g. hardware, O/S, power).
%%
%% The default mode, `none`, writes data into operating system buffers
%% which which will be written to the disks when those buffers are
%% flushed by the operating system. If the system fails (power loss,
%% crash, etc.) before before those buffers are flushed to stable
%% storage that data is lost.
%%
%% This is prevented by the setting `o_sync` which forces the
%% operating system to flush to stable storage at every write. The
%% effect of flushing each write is better durability, however write
%% throughput will suffer as each write will have to wait for the
%% write to complete.
%%
%% Available Sync Strategies:
%%
%% * `none` - (default) Lets the operating system manage syncing
%%   writes.
%% * `o_sync` - Uses the O_SYNC flag which forces syncs on every
%%   write.
%% * `interval` - Riak will force Bitcask to sync every
%%   `bitcask.sync.interval` seconds.
{mapping, "bitcask.sync.strategy", "bitcask.sync_strategy", [
  {default, none},
  {datatype, {enum, [none, o_sync, interval]}},
  hidden
]}.

%% @see bitcask.sync.strategy
{mapping, "bitcask.sync.interval", "bitcask.sync_strategy", [
  {datatype, {duration, s}},
  hidden
]}.

{translation,
 "bitcask.sync_strategy",
 fun(Conf) ->
  Setting = cuttlefish:conf_get("bitcask.sync.strategy", Conf),
    case Setting of
      none -> none;
      o_sync -> o_sync;
      interval ->
        Interval = cuttlefish:conf_get("bitcask.sync.interval", Conf, undefined),
        {seconds, Interval};
      _Default -> none
    end
 end}.

%% @doc Describes the maximum permitted size for any single data file
%% in the Bitcask directory. If a write causes the current file to
%% exceed this size threshold then that file is closed, and a new file
%% is opened for writes.
{mapping, "bitcask.max_file_size", "bitcask.max_file_size", [
  {default, "2GB"},
  {datatype, bytesize},
  hidden
]}.

%% @doc Lets you specify when during the day merge operations are
%% allowed to be triggered. Valid options are:
%%
%% * `always` (default) No restrictions
%% * `never` Merge will never be attempted
%% * `window` Hours during which merging is permitted, where
%%   `bitcask.merge.window.start` and `bitcask.merge.window.end` are
%%   integers between 0 and 23.
%%
%% If merging has a significant impact on performance of your cluster,
%% or your cluster has quiet periods in which little storage activity
%% occurs, you may want to change this setting from the default.
{mapping, "bitcask.merge.policy", "bitcask.merge_window", [
  {default, always},
  {datatype, {enum, [always, never, window]}},
  hidden
]}.

%% @see bitcask.merge.policy
{mapping, "bitcask.merge.window.start", "bitcask.merge_window", [
  {default, 0},
  {datatype, integer},
  hidden
]}.

%% @see bitcask.merge.policy
{mapping, "bitcask.merge.window.end", "bitcask.merge_window", [
  {default, 23},
  {datatype, integer},
  hidden
]}.

{translation,
 "bitcask.merge_window",
 fun(Conf) ->
  Setting = cuttlefish:conf_get("bitcask.merge.policy", Conf),
    case Setting of
      always -> always;
      never -> never;
      window ->
        Start = cuttlefish:conf_get("bitcask.merge.window.start", Conf, undefined),
        End = cuttlefish:conf_get("bitcask.merge.window.end", Conf, undefined),
        {Start, End};
      _Default -> always
    end
 end}.

%% @doc Describes what ratio of dead keys to total keys in a file will
%% trigger merging. The value of this setting is a percentage
%% (0-100). For example, if a data file contains 6 dead keys and 4
%% live keys, then merge will be triggered at the default
%% setting. Increasing this value will cause merging to occur less
%% often, whereas decreasing the value will cause merging to happen
%% more often.
%%
%% Default is: `60`
{mapping, "bitcask.merge.triggers.fragmentation",
 "bitcask.frag_merge_trigger",
 [
  {datatype, integer},
  hidden,
  {default, 60},
  {validators, ["is_percentage"]}
]}.

{validator,
 "is_percentage",
 "must be a percentage",
 fun(Value) ->
   Value >= 0 andalso Value =< 100
 end}.

%% @doc Describes how much data stored for dead keys in a single file
%% will trigger merging. The value is in bytes. If a file meets or
%% exceeds the trigger value for dead bytes, merge will be
%% triggered. Increasing the value will cause merging to occur less
%% often, whereas decreasing the value will cause merging to happen
%% more often.
%%
%% When either of these constraints are met by any file in the
%% directory, Bitcask will attempt to merge files.
%%
%% Default is: 512MB
{mapping,
 "bitcask.merge.triggers.dead_bytes",
 "bitcask.dead_bytes_merge_trigger", [
  {datatype, bytesize},
  hidden,
  {default, "512MB"}
]}.


%% @doc Describes what ratio of dead keys to total keys in a file will
%% cause it to be included in the merge. The value of this setting is
%% a percentage (0-100). For example, if a data file contains 4 dead
%% keys and 6 live keys, it will be included in the merge at the
%% default ratio. Increasing the value will cause fewer files to be
%% merged, decreasing the value will cause more files to be merged.
%%
%% Default is: `40`
{mapping,
 "bitcask.merge.thresholds.fragmentation",
 "bitcask.frag_threshold", [
  {datatype, integer},
  hidden,
  {default, 40},
  {validators, ["is_percentage"]}
]}.

%% @doc Describes the minimum amount of data occupied by dead keys in
%% a file to cause it to be included in the merge. Increasing the
%% value will cause fewer files to be merged, decreasing the value
%% will cause more files to be merged.
%%
%% Default is: 128MB
{mapping,
 "bitcask.merge.thresholds.dead_bytes",
 "bitcask.dead_bytes_threshold", [
  {datatype, bytesize},
  hidden,
  {default, "128MB"}
]}.

%% @doc Describes the minimum size a file must have to be _excluded_
%% from the merge. Files smaller than the threshold will be
%% included. Increasing the value will cause _more_ files to be
%% merged, decreasing the value will cause _fewer_ files to be merged.
%%
%% Default is: 10MB
{mapping,
 "bitcask.merge.thresholds.small_file",
 "bitcask.small_file_threshold", [
  {datatype, bytesize},
  hidden,
  {default, "10MB"}
]}.

%% @doc Fold keys thresholds will reuse the keydir if another fold was
%% started less than `fold.max_age` ago and there were less than
%% `fold.max_puts` updates.  Otherwise it will wait until all current
%% fold keys complete and then start.  Set either option to unlimited
%% to disable.
{mapping, "bitcask.fold.max_age", "bitcask.max_fold_age", [
  {datatype, [{atom, unlimited}, {duration, ms}]},
  hidden,
  {default, unlimited}
]}.

{translation, "bitcask.max_fold_age",
 fun(Conf) ->
  case cuttlefish:conf_get("bitcask.fold.max_age", Conf) of
    unlimited -> -1;
    I when is_integer(I) ->
      %% app.config expects microseconds
      I * 1000;
    _ -> -1 %% The default, for safety
  end
 end
}.

%% @see bitcask.fold.max_age
{mapping, "bitcask.fold.max_puts", "bitcask.max_fold_puts", [
  {datatype, [integer, {atom, unlimited}]},
  hidden,
  {default, 0}
]}.

{translation, "bitcask.max_fold_puts",
 fun(Conf) ->
   case cuttlefish:conf_get("bitcask.fold.max_puts", Conf) of
     unlimited -> -1;
     I when is_integer(I) -> I;
     _ -> 0 %% default catch
   end
 end
}.

%% @doc By default, Bitcask keeps all of your data around. If your
%% data has limited time-value, or if for space reasons you need to
%% purge data, you can set the `expiry` option. If you needed to
%% purge data automatically after 1 day, set the value to `1d`.
%%
%% Default is: `off` which disables automatic expiration
{mapping, "bitcask.expiry", "bitcask.expiry_secs", [
  {datatype, [{atom, off}, {duration, s}]},
  hidden,
  {default, off}
]}.

{translation, "bitcask.expiry_secs",
 fun(Conf) ->
   case cuttlefish:conf_get("bitcask.expiry", Conf) of
     off -> -1;
     I when is_integer(I) -> I;
     _ -> -1
   end
 end
}.

%% @doc Require the CRC to be present at the end of hintfiles.
%% Setting this to `allow_missing` runs Bitcask in a backward
%% compatible mode where old hint files will still be accepted without
%% CRC signatures.
{mapping, "bitcask.hintfile_checksums", "bitcask.require_hint_crc", [
  {default, strict},
  {datatype, {enum, [strict, allow_missing]}},
  hidden
]}.

{translation, "bitcask.require_hint_crc",
 fun(Conf) ->
   case cuttlefish:conf_get("bitcask.hintfile_checksums", Conf) of
     strict -> true;
     allow_missing -> false;
     _ -> true
   end
 end}.

%% @doc By default, Bitcask will trigger a merge whenever a data file
%% contains an expired key. This may result in excessive merging under
%% some usage patterns. To prevent this you can set the
%% `bitcask.expiry.grace_time` option.  Bitcask will defer triggering
%% a merge solely for key expiry by the configured number of
%% seconds. Setting this to `1h` effectively limits each cask to
%% merging for expiry once per hour.
%%
%% Default is: `0`
{mapping, "bitcask.expiry.grace_time", "bitcask.expiry_grace_time", [
  {datatype, {duration, s}},
  hidden,
  {default, 0}
]}.

%% @doc Configure how Bitcask writes data to disk.
%%   erlang: Erlang's built-in file API
%%      nif: Direct calls to the POSIX C API
%%
%% The NIF mode provides higher throughput for certain
%% workloads, but has the potential to negatively impact
%% the Erlang VM, leading to higher worst-case latencies
%% and possible throughput collapse.
{mapping, "bitcask.io_mode", "bitcask.io_mode", [
  {default, erlang},
  {datatype, {enum, [erlang, nif]}}
]}.