This file is indexed.

/etc/couchdb/default.ini is in couchdb-bin 1.5.0-0ubuntu1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
; etc/couchdb/default.ini.tpl.  Generated from default.ini.tpl.in by configure.

; Upgrading CouchDB will overwrite this file.
[vendor]
name = Ubuntu
version = 14.04

[couchdb]
database_dir = /var/lib/couchdb
view_index_dir = /var/lib/couchdb
util_driver_dir = /usr/lib/x86_64-linux-gnu/couchdb/erlang/lib/couch-1.5.0/priv/lib
max_document_size = 4294967296 ; 4 GB
os_process_timeout = 5000 ; 5 seconds. for view and external servers.
max_dbs_open = 100
delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned
uri_file = /var/run/couchdb/couch.uri
; Method used to compress everything that is appended to database and view index files, except
; for attachments (see the attachments section). Available methods are:
;
; none         - no compression
; snappy       - use google snappy, a very fast compressor/decompressor
; deflate_[N]  - use zlib's deflate, N is the compression level which ranges from 1 (fastest,
;                lowest compression ratio) to 9 (slowest, highest compression ratio)
file_compression = snappy
; Higher values may give better read performance due to less read operations
; and/or more OS page cache hits, but they can also increase overall response
; time for writes when there are many attachment write requests in parallel.
attachment_stream_buffer_size = 4096

plugin_dir = /usr/lib/x86_64-linux-gnu/couchdb/plugins

[database_compaction]
; larger buffer sizes can originate smaller files
doc_buffer_size = 524288 ; value in bytes
checkpoint_after = 5242880 ; checkpoint after every N bytes were written

[view_compaction]
; larger buffer sizes can originate smaller files
keyvalue_buffer_size = 2097152 ; value in bytes

[httpd]
port = 5984
bind_address = 127.0.0.1
authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
default_handler = {couch_httpd_db, handle_request}
secure_rewrites = true
vhost_global_handlers = _utils, _uuids, _session, _oauth, _users
allow_jsonp = false
; Options for the MochiWeb HTTP server.
;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
; For more socket options, consult Erlang's module 'inet' man page.
;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
log_max_chunk_size = 1000000
enable_cors = false

[ssl]
port = 6984

[log]
file = /var/log/couchdb/couch.log
level = info
include_sasl = true

[couch_httpd_auth]
authentication_db = _users
authentication_redirect = /_utils/session.html
require_valid_user = false
timeout = 600 ; number of seconds before automatic logout
auth_cache_size = 50 ; size is number of cache entries
allow_persistent_cookies = false ; set to true to allow persistent cookies
iterations = 10 ; iterations for password hashing
; comma-separated list of public fields, 404 if empty
; public_fields =

[cors]
credentials = false
; List of origins separated by a comma, * means accept all
; Origins must include the scheme: http://example.com
; You can’t set origins: * and credentials = true at the same time.
;origins = *
; List of accepted headers separated by a comma
; headers =
; List of accepted methods
; methods =


; Configuration for a vhost
;[cors:http://example.com]
; credentials = false
; List of origins separated by a comma
; Origins must include the scheme: http://example.com
; You can’t set origins: * and credentials = true at the same time.
;origins =
; List of accepted headers separated by a comma
; headers =
; List of accepted methods
; methods =

[couch_httpd_oauth]
; If set to 'true', oauth token and consumer secrets will be looked up
; in the authentication database (_users). These secrets are stored in
; a top level property named "oauth" in user documents. Example:
;     {
;         "_id": "org.couchdb.user:joe",
;         "type": "user",
;         "name": "joe",
;         "password_sha": "fe95df1ca59a9b567bdca5cbaf8412abd6e06121",
;         "salt": "4e170ffeb6f34daecfd814dfb4001a73"
;         "roles": ["foo", "bar"],
;         "oauth": {
;             "consumer_keys": {
;                 "consumerKey1": "key1Secret",
;                 "consumerKey2": "key2Secret"
;             },
;             "tokens": {
;                 "token1": "token1Secret",
;                 "token2": "token2Secret"
;             }
;         }
;     }
use_users_db = false

[query_servers]
javascript = /usr/bin/couchjs /usr/share/couchdb/server/main.js
coffeescript = /usr/bin/couchjs /usr/share/couchdb/server/main-coffee.js


; Changing reduce_limit to false will disable reduce_limit.
; If you think you're hitting reduce_limit with a "good" reduce function,
; please let us know on the mailing list so we can fine tune the heuristic.
[query_server_config]
reduce_limit = true
os_process_limit = 25

[daemons]
index_server={couch_index_server, start_link, []}
external_manager={couch_external_manager, start_link, []}
query_servers={couch_query_servers, start_link, []}
vhosts={couch_httpd_vhost, start_link, []}
httpd={couch_httpd, start_link, []}
stats_aggregator={couch_stats_aggregator, start, []}
stats_collector={couch_stats_collector, start, []}
uuids={couch_uuids, start, []}
auth_cache={couch_auth_cache, start_link, []}
replicator_manager={couch_replicator_manager, start_link, []}
os_daemons={couch_os_daemons, start_link, []}
compaction_daemon={couch_compaction_daemon, start_link, []}

[httpd_global_handlers]
/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
favicon.ico = {couch_httpd_misc_handlers, handle_favicon_req, "/usr/share/couchdb/www"}

_utils = {couch_httpd_misc_handlers, handle_utils_dir_req, "/usr/share/couchdb/www"}
_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req}
_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
_config = {couch_httpd_misc_handlers, handle_config_req}
_replicate = {couch_replicator_httpd, handle_req}
_uuids = {couch_httpd_misc_handlers, handle_uuids_req}
_restart = {couch_httpd_misc_handlers, handle_restart_req}
_stats = {couch_httpd_stats_handlers, handle_stats_req}
_log = {couch_httpd_misc_handlers, handle_log_req}
_session = {couch_httpd_auth, handle_session_req}
_oauth = {couch_httpd_oauth, handle_oauth_req}
_db_updates = {couch_dbupdates_httpd, handle_req}
_plugins = {couch_plugins_httpd, handle_req}

[httpd_db_handlers]
_all_docs = {couch_mrview_http, handle_all_docs_req}
_changes = {couch_httpd_db, handle_changes_req}
_compact = {couch_httpd_db, handle_compact_req}
_design = {couch_httpd_db, handle_design_req}
_temp_view = {couch_mrview_http, handle_temp_view_req}
_view_cleanup = {couch_mrview_http, handle_cleanup_req}

; The external module takes an optional argument allowing you to narrow it to a
; single script. Otherwise the script name is inferred from the first path section
; after _external's own path.
; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>}
; _external = {couch_httpd_external, handle_external_req}

[httpd_design_handlers]
_compact = {couch_mrview_http, handle_compact_req}
_info = {couch_mrview_http, handle_info_req}
_list = {couch_mrview_show, handle_view_list_req}
_rewrite = {couch_httpd_rewrite, handle_rewrite_req}
_show = {couch_mrview_show, handle_doc_show_req}
_update = {couch_mrview_show, handle_doc_update_req}
_view = {couch_mrview_http, handle_view_req}

; enable external as an httpd handler, then link it with commands here.
; note, this api is still under consideration.
; [external]
; mykey = /path/to/mycommand

; Here you can setup commands for CouchDB to manage
; while it is alive. It will attempt to keep each command
; alive if it exits.
; [os_daemons]
; some_daemon_name = /path/to/script -with args


[uuids]
; Known algorithms:
;   random - 128 bits of random awesome
;     All awesome, all the time.
;   sequential - monotonically increasing ids with random increments
;     First 26 hex characters are random. Last 6 increment in
;     random amounts until an overflow occurs. On overflow, the
;     random prefix is regenerated and the process starts over.
;   utc_random - Time since Jan 1, 1970 UTC with microseconds
;     First 14 characters are the time in hex. Last 18 are random.
;   utc_id - Time since Jan 1, 1970 UTC with microseconds, plus utc_id_suffix string
;     First 14 characters are the time in hex. uuids/utc_id_suffix string value is appended to these.
algorithm = sequential
; The utc_id_suffix value will be appended to uuids generated by the utc_id algorithm.
; Replicating instances should have unique utc_id_suffix values to ensure uniqueness of utc_id ids.
utc_id_suffix =

[stats]
; rate is in milliseconds
rate = 1000
; sample intervals are in seconds
samples = [0, 60, 300, 900]

[attachments]
compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
compressible_types = text/*, application/javascript, application/json, application/xml

[replicator]
db = _replicator
; Maximum replicaton retry count can be a non-negative integer or "infinity".
max_replication_retry_count = 10
; More worker processes can give higher network throughput but can also
; imply more disk and network IO.
worker_processes = 4
; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
; also reduce the total amount of used RAM memory.
worker_batch_size = 500
; Maximum number of HTTP connections per replication.
http_connections = 20
; HTTP connection timeout per replication.
; Even for very fast/reliable networks it might need to be increased if a remote
; database is too busy.
connection_timeout = 30000
; If a request fails, the replicator will retry it up to N times.
retries_per_request = 10
; Some socket options that might boost performance in some scenarios:
;       {nodelay, boolean()}
;       {sndbuf, integer()}
;       {recbuf, integer()}
;       {priority, integer()}
; See the `inet` Erlang module's man page for the full list of options.
socket_options = [{keepalive, true}, {nodelay, false}]
; Path to a file containing the user's certificate.
;cert_file = /full/path/to/server_cert.pem
; Path to file containing user's private PEM encoded key.
;key_file = /full/path/to/server_key.pem
; String containing the user's password. Only used if the private keyfile is password protected.
;password = somepassword
; Set to true to validate peer certificates.
verify_ssl_certificates = false
; File containing a list of peer trusted certificates (in the PEM format).
;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
; Maximum peer certificate depth (must be set even if certificate validation is off).
ssl_certificate_max_depth = 3

[compaction_daemon]
; The delay, in seconds, between each check for which database and view indexes
; need to be compacted.
check_interval = 300
; If a database or view index file is smaller then this value (in bytes),
; compaction will not happen. Very small files always have a very high
; fragmentation therefore it's not worth to compact them.
min_file_size = 131072

[compactions]
; List of compaction rules for the compaction daemon.
; The daemon compacts databases and their respective view groups when all the
; condition parameters are satisfied. Configuration can be per database or
; global, and it has the following format:
;
; database_name = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
; _default = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
;
; Possible parameters:
;
; * db_fragmentation - If the ratio (as an integer percentage), of the amount
;                      of old data (and its supporting metadata) over the database
;                      file size is equal to or greater then this value, this
;                      database compaction condition is satisfied.
;                      This value is computed as:
;
;                           (file_size - data_size) / file_size * 100
;
;                      The data_size and file_size values can be obtained when
;                      querying a database's information URI (GET /dbname/).
;
; * view_fragmentation - If the ratio (as an integer percentage), of the amount
;                        of old data (and its supporting metadata) over the view
;                        index (view group) file size is equal to or greater then
;                        this value, then this view index compaction condition is
;                        satisfied. This value is computed as:
;
;                            (file_size - data_size) / file_size * 100
;
;                        The data_size and file_size values can be obtained when
;                        querying a view group's information URI
;                        (GET /dbname/_design/groupname/_info).
;
; * from _and_ to - The period for which a database (and its view groups) compaction
;                   is allowed. The value for these parameters must obey the format:
;
;                   HH:MM - HH:MM  (HH in [0..23], MM in [0..59])
;
; * strict_window - If a compaction is still running after the end of the allowed
;                   period, it will be canceled if this parameter is set to 'true'.
;                   It defaults to 'false' and it's meaningful only if the *period*
;                   parameter is also specified.
;
; * parallel_view_compaction - If set to 'true', the database and its views are
;                              compacted in parallel. This is only useful on
;                              certain setups, like for example when the database
;                              and view index directories point to different
;                              disks. It defaults to 'false'.
;
; Before a compaction is triggered, an estimation of how much free disk space is
; needed is computed. This estimation corresponds to 2 times the data size of
; the database or view index. When there's not enough free disk space to compact
; a particular database or view index, a warning message is logged.
;
; Examples:
;
; 1) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}]
;    The `foo` database is compacted if its fragmentation is 70% or more.
;    Any view index of this database is compacted only if its fragmentation
;    is 60% or more.
;
; 2) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}]
;    Similar to the preceding example but a compaction (database or view index)
;    is only triggered if the current time is between midnight and 4 AM.
;
; 3) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}]
;    Similar to the preceding example - a compaction (database or view index)
;    is only triggered if the current time is between midnight and 4 AM. If at
;    4 AM the database or one of its views is still compacting, the compaction
;    process will be canceled.
;
; 4) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}, {parallel_view_compaction, true}]
;    Similar to the preceding example, but a database and its views can be
;    compacted in parallel.
;
;_default = [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "23:00"}, {to, "04:00"}]