This file is indexed.

/usr/include/dar/archive.hpp is in libdar-dev 2.4.2-1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
//*********************************************************************/
// dar - disk archive - a backup/restoration program
// Copyright (C) 2002-2052 Denis Corbin
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; either version 2
// of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
//
// to contact the author : http://dar.linux.free.fr/email.html
/*********************************************************************/
// $Id: archive.hpp,v 1.69 2011/04/02 20:14:26 edrusb Rel $
//
/*********************************************************************/
//

    /// \file archive.hpp
    /// \brief the archive class is defined in this module
    /// \ingroup API


#ifndef ARCHIVE_HPP
#define ARCHIVE_HPP

#include "/usr/include/dar/libdar_my_config.h"

#include "/usr/include/dar/path.hpp"
#include "/usr/include/dar/scrambler.hpp"
#include "/usr/include/dar/statistics.hpp"
#include "/usr/include/dar/archive_options.hpp"
#include "/usr/include/dar/escape.hpp"
#include "/usr/include/dar/escape_catalogue.hpp"
#include "/usr/include/dar/pile.hpp"

namespace libdar
{

	/// the archive class realizes the most general operations on archives

	/// the operations corresponds to the one the final user expects, these
	/// are the same abstraction level as the operation realized by the DAR
	/// command line tool.
	/// \ingroup API
    class archive
    {
    public:

	    /// this constructor opens an already existing archive (for reading) [this is the "read" constructor]

	    /// \param[in,out] dialog for user interaction
	    /// \param[in] chem the path where to look for slices
	    /// \param[in] basename the slices basename of the archive to read
	    /// ("-" means standard input, and activates the output_pipe and input_pipe arguments)
	    /// \param[in] extension the slice extension (should always be "dar")
	    /// \param[in] options A set of option to use to read the archive
	archive(user_interaction & dialog,
		const path & chem,
		const std::string & basename,
		const std::string & extension,
		const archive_options_read & options);


	    /// this constuctor create an archive (full or differential) [this is the "create" constructor]

	    /// \param[in,out] dialog for user interaction
	    /// \param[in] fs_root the filesystem to take as root for the backup
	    /// \param[in] sauv_path the path where to create slices
	    /// \param[in] filename base name of the slices. If "-" is given the archive will be produced in standard output
	    /// \param[in] extension slices extension ("dar")
	    /// \param[in] options optional parameters to use for the operation
 	    /// \param[out] progressive_report statistics about the operation, considering the treated files (NULL can be given if you don't want to use this feature)
	    /// \note the statistics fields used are:
	    /// - .treated: the total number of files seen
	    /// - .hard_link: the number of hard linked inodes
	    /// - .tooold: the number of files that changed at the time they were saved and that could not be resaved (due to repeat limit or byte limit)
	    /// - .skipped: number of files not changed (differential backup)
	    /// - .errored: number of files concerned by filesystem error
	    /// - .ignored: number of files excluded by filters
	    /// - .deleted: number of files recorded as deleted
	    /// - .ea_treated: number of entry having some EA
	    /// - .byte_amount : number of wasted bytes due to repeat on change feature
	    /// .
	archive(user_interaction & dialog,
		const path & fs_root,
		const path & sauv_path,
		const std::string & filename,
		const std::string & extension,
		const archive_options_create & options,
		statistics * progressive_report);


	    /// this constructor isolates a catalogue of a given archive [this is the "isolate" constructor]

	    /// \param[in,out] dialog for user interaction
	    /// \param[in] sauv_path the path where to create slices
	    /// \param[in] ref_arch the archive to take as reference (NULL for a full backup)
	    /// \param[in] filename base name of the slices ("-" for standard output)
	    /// \param[in] extension slices extension ("dar")
	    /// \param[in] options optional parameters to use for the operation
	archive(user_interaction & dialog,
		const path &sauv_path,
		archive *ref_arch,
		const std::string & filename,
		const std::string & extension,
		const archive_options_isolate & options);


	    /// this constructor builds an archive from two given archive [this is the "merge" constructor]

	    /// \param[in,out] dialog for user interaction
	    /// \param[in] sauv_path the path where to create slices
	    /// \param[in] ref_arch1 the first mandatory input archive (the second is optional and provided within the 'option' argument
	    /// \param[in] filename base name of the slices. If "-" is given the archive will be produced in standard output
	    /// \param[in] extension slices extension ("dar")
	    /// \param[in] options optional parameters to be used for the operation
	    /// \param[out] progressive_report statistics about the operation, considering the treated files (NULL can be given if you don't want to use this feature)
	    /// \note the statistics fields used are:
	    /// - .treated: the total number of files seen
	    /// - .hard_link: the number of hard linked inodes
	    /// - .ignored: number of files excluded by filters
	    /// - .deleted: number of files recorded as deleted
	    /// - .ea_treated: number of entry with EA
	    /// .

	archive(user_interaction & dialog,
		const path & sauv_path,
		archive *ref_arch1,
		const std::string & filename,
		const std::string & extension,
		const archive_options_merge & options,
		statistics * progressive_report);

	    /// copy constructor (not implemented, throw an exception if called explicitely or implicitely)

	    /// \note this lack of implementation is intentionnal, Archive should rather be manipulated
	    /// using pointers, or passed as constant reference (const &) in arguments or returned values.
	    /// Moreover, having two objets one copy of the other may lead to unexpected behaviors while
	    /// merging or creating, isolating or merging archives.

	archive(const archive & ref) : stack(ref.stack) { throw Efeature(gettext("Archive copy constructor is not implemented")); };
	archive & operator = (const archive & ref) { throw Efeature(gettext("Archive assignment operator is not implemented")); };

	    /// the destructor
	~archive() { free(); };


	    /// extraction of data from an archive

	    /// \param[in,out] dialog for user interaction
	    /// \param[in] fs_root the filesystem to take as root for the restoration
	    /// \param[in] options optional parameter to be used for the operation
	    /// \param[in,out] progressive_report points to an already existing statistics object that can be consulted at any time
	    /// during the call (see the returned value to know the useful fields and their meining),
	    /// NULL can be given in argument if you only need the result at the end of the operation through the returned value of this call
	    /// this should speed up the operation by a little amount.
	    /// \return the statistics about the operation, considering the treated files
	    /// \note the statistics fields used are:
	    /// - .treated: the total number of files restored
	    /// - .skipped: number of files not saved in the archive
	    /// - .tooold: number of file not restored due to overwriting policy decision
	    /// - .errored: number of files concerned by filesystem error
	    /// - .ignored: number of files excluded by filters
	    /// - .deleted: number of files deleted
	    /// - .hard_links: number of hard link restored
	    /// - .ea_treated: number of entry having some EA
	    /// .
	statistics op_extract(user_interaction & dialog,
			      const path &fs_root,
			      const archive_options_extract & options,
			      statistics *progressive_report);

	    /// display a summary of the archive

	void summary(user_interaction & dialog);


	    /// listing of the archive contents

	    /// \param[in,out] dialog for user interaction
	    /// \param[in] options list of optional parameters to use for the operation
	void op_listing(user_interaction & dialog,
			const archive_options_listing & options);

	    /// archive comparison with filesystem

	    /// \param[in,out] dialog for user interaction
	    /// \param[in] fs_root the filesystem to take as root for the comparison
	    /// \param[in] options optional parameters to be used with the operation
	    /// \param[in,out] progressive_report points to an already existing statistics object that can be consulted at any time
	    /// during the call (see the returned value to know the useful fields and their meining),
	    /// NULL can be given in argument if you only need the result at the end of the operation through the returned value of this call
	    /// this should speed up the operation by a little amount.
	    /// \return the statistics about the operation, considering the treated files
	    /// \note the statistics fields used are:
	    /// - .treated: the total number of files seen
	    /// - .errored: number of files that do not match or could not be read
	    /// - .ignored: number of files excluded by filters
	    /// .
	statistics op_diff(user_interaction & dialog,
			   const path & fs_root,
			   const archive_options_diff & options,
			   statistics * progressive_report);


	    /// test the archive integrity

	    /// \param[in,out] dialog for user interaction
	    /// \param[in] options optional parameter to use for the operation
	    /// \param[in,out] progressive_report points to an already existing statistics object that can be consulted at any time
	    /// during the call (see the returned value to know the useful fields and their meining),
	    /// NULL can be given in argument if you only need the result at the end of the operation through the returned value of this call
	    /// this should speed up the operation by a little amount.
	    /// \note op_test will generate an error message if used on an archive
	    /// that has been created by the isolate or creation constructor
	    /// this is not only an implementation limitation but also a choice.
	    /// testing an file archive using the C++ object used to create
	    /// the file is not a good idea. You need to first destroy this
	    /// C++ object then create a new one with the reading constructor
	    /// this way only you can be sure your archive is properly tested.
	    /// \return the statistics about the operation, considering the treated files
	    /// \note the statistics fields used are:
	    /// - .treated: the total number of files seen
	    /// - .skipped: number of file older than the one on filesystem
	    /// - .errored: number of files with error
	    /// .
	statistics op_test(user_interaction & dialog,
			   const archive_options_test & options,
			   statistics * progressive_report);


	    /// getting information about a given directory

	    /// \param[in,out] dialog for user interaction
	    /// \param[in] dir relative path the directory to get information about
	    /// \return true if some children have been found and thus if
	    /// the dialog.listing() method has been called at least once.
	    /// \note the get_children_of() call uses the listing() method
	    /// to send back data to the user. If it is not redifined in the
	    /// dialog object nothing will get sent back to the user
	bool get_children_of(user_interaction & dialog,
			     const std::string & dir);

	    /// retrieving statistics about archive contents
	const entree_stats get_stats() const { if(cat == NULL) throw SRC_BUG; return cat->get_stats(); };

	    /// gives access to internal catalogue (not to be used from the API)

	    /// \return the catalogue reference contained in this archive
	    /// \note this method is not to be used directly from external application, it is
	    /// not part of the API but must remain a public method for been usable by the database class
	    /// \note this method is not usable (throws an exception) if the archive has been
	    /// open in sequential read mode and the catalogue has not yet been read; use the
	    /// same method but with user_interaction argument instead, in that situation
	const catalogue & get_catalogue() const;

	    /// gives access to internal catalogue (not to be used from the API) even in sequential read mode
	const catalogue & get_catalogue(user_interaction & dialog) const;

	    /// closes all filedescriptors and associated data, just keep the catalogue

	    /// \note once this method has been called, the archive object can only be used
	    /// as reference for a differential archive.
	    /// \note this method is not usable (throws an exception) if the archive has been
	    /// open in sequential read mode and the catalogue has not yet been read; use the
	    /// same method but with user_interaction argument instead in that situation
	void drop_all_filedescriptors();

	    /// closes all filedescriptors and associated even when in sequential read mode

	void drop_all_filedescriptors(user_interaction & dialog);

    private:
	enum operation { oper_create, oper_isolate, oper_merge };

	pile stack;
	header_version ver;
	catalogue *cat;
	infinint local_cat_size;
	path *local_path;
	bool exploitable; //< is false if only the catalogue is available (for reference backup or isolation).
	bool lax_read_mode; //< whether the archive has been openned in lax mode (unused for creation/merging/isolation)
	bool sequential_read; //< whether the archive is read in sequential mode

	void free();
	catalogue & get_cat() { if(cat == NULL) throw SRC_BUG; else return *cat; };
	const header_version & get_header() const { return ver; };
	const path & get_path() { if(local_path == NULL) throw SRC_BUG; else return *local_path; };

	bool get_sar_param(infinint & sub_file_size, infinint & first_file_size, infinint & last_file_size,
			   infinint & total_file_number);
	infinint get_level2_size();
	infinint get_cat_size() const { return local_cat_size; };

	statistics op_create_in(user_interaction & dialog,
				operation op,
				const path & fs_root,
				const path & sauv_path,
				archive *ref_arch,
				const mask & selection,
				const mask & subtree,
				const std::string & filename,
				const std::string & extension,
				bool allow_over,
				bool warn_over,
				bool info_details,
				const infinint & pause,
				bool empty_dir,
				compression algo,
				U_I compression_level,
				const infinint & file_size,
				const infinint & first_file_size,
				const mask & ea_mask,
				const std::string & execute,
				crypto_algo crypto,
				const secu_string & pass,
				U_32 crypto_size,
				const mask & compr_mask,
				const infinint & min_compr_size,
				bool nodump,
				const infinint & hourshift,
				bool empty,
				bool alter_atime,
				bool furtive_read_mode,
				bool same_fs,
				inode::comparison_fields what_to_check,
				bool snapshot,
				bool cache_directory_tagging,
				bool display_skipped,
				const infinint & fixed_date,
				const std::string & slice_permission,
				const std::string & slice_user_ownership,
				const std::string & slice_group_ownership,
				const infinint & repeat_count,
				const infinint & repeat_byte,
				bool add_marks_for_sequential_reading,
				bool security_check,
				const infinint & sparse_file_min_size,
				const std::string & user_comment,
				hash_algo hash,
				const infinint & slice_min_digits,
				const std::string & backup_hook_file_execute,
				const mask & backup_hook_file_mask,
				bool ignore_unknown,
				statistics * progressive_report);

	void op_create_in_sub(user_interaction & dialog,        //< interaction with user
			      operation op,                     //< the filter operation to bind to
			      const path & fs_root,             //< root of the filesystem to act on
			      const path & sauv_path_t,         //< where to create the archive
			      catalogue  * ref_arch1,           //< catalogue of the archive of reference (a catalogue must be provided in any case, a empty one shall fit for no reference)
			      catalogue  * ref_arch2,           //< secondary catalogue used for merging, can be NULL if not used
			      const path * ref_path,            //< path of the archive of archive of reference (NULL if there is no archive of reference used, thus ref_arch (previous arg) is probably an empty archive)
			      const mask & selection,           //< filter on filenames
			      const mask & subtree,             //< filter on directory tree and filenames
			      const std::string & filename,     //< basename of the archive to create
			      const std::string & extension,    //< extension of the archives
			      bool allow_over,                  //< whether to allow overwriting (of slices)
			      const crit_action & overwrite,    //< whether and how to allow overwriting (for files inside the archive)
			      bool warn_over,                   //< whether to warn before overwriting
			      bool info_details,                //< whether to display detailed informations
			      const infinint & pause,           //< whether to pause between slices
			      bool empty_dir,                   //< whether to store excluded dir as empty directories
			      compression algo,                 //< compression algorithm
			      U_I compression_level,            //< compression level (range 1 to 9)
			      const infinint & file_size,       //< slice size
			      const infinint & first_file_size, //< first slice size
			      const mask & ea_mask,             //< Extended Attribute to consider
			      const std::string & execute,      //< Command line to execute between slices
			      crypto_algo crypto,               //< crypt algorithm
			      const secu_string & pass,         //< password ("" for onfly request of password)
			      U_32 crypto_size,                 //< size of crypto blocks
			      const mask & compr_mask,          //< files to compress
			      const infinint & min_compr_size,  //< file size under which to not compress files
			      bool nodump,                      //< whether to consider the "nodump" filesystem flag
			      const infinint & hourshift,       //< hourshift (see man page -H option)
			      bool empty,                       //< whether to make an "dry-run" execution
			      bool alter_atime,                 //< whether to alter atime date (by opposition to ctime) when reading files
			      bool furtive_read_mode,           //< whether to neither alter atime nor ctome (if true alter_atime is ignored)
			      bool same_fs,                     //< confin the files consideration to a single filesystem
			      inode::comparison_fields what_to_check,  //< fields to consider wien comparing inodes (see inode::comparison_fields enumeration)
			      bool snapshot,                    //< make as if all file had not changed
			      bool cache_directory_tagging,     //< avoid saving directory which follow the cache directory tagging
			      bool display_skipped,             //< display skipped files for the operation
			      bool keep_compressed,             //< keep file compressed when merging
			      const infinint & fixed_date,      //< whether to ignore any archive of reference and only save file which modification is more recent that the given "fixed_date" date
			      const std::string & slice_permission,      //< permissions of slices that will be created
			      const std::string & slice_user_ownership,  //< user ownership of slices that will be created
			      const std::string & slice_group_ownership, //< group ownership of slices that will be created
			      const infinint & repeat_count,             //< max number of retry to save a file that have changed while it was read for backup
			      const infinint & repeat_byte,              //< max amount of wasted data used to save a file that have changed while it was read for backup
			      bool decremental,                          //< in the merging context only, whether to build a decremental backup from the two archives of reference
			      bool add_marks_for_sequential_reading,     //< whether to add marks for sequential reading
			      bool security_check,                       //< whether to check for ctime change with no reason (rootkit ?)
			      const infinint & sparse_file_min_size,     //< starting which size to consider looking for holes in sparse files (0 for no detection)
			      const std::string & user_comment,          //< user comment to put in the archive
			      hash_algo hash,                            //< whether to produce hash file, and which algo to use
			      const infinint & slice_min_digits,         //< minimum digit for slice number
			      const std::string & backup_hook_file_execute, //< command to execute before and after files to backup
			      const mask & backup_hook_file_mask,         //< files elected to have a command executed before and after their backup
			      bool ignore_unknown,                        //< whether to warn when an unknown inode type is met
			      statistics * st_ptr);             //< statistics must not be NULL !

	void disable_natural_destruction();
	void enable_natural_destruction();
	const label & get_layer1_data_name() const;
	const label & get_catalogue_data_name() const;
	bool only_contains_an_isolated_catalogue() const; //< true if the current archive only contains an isolated catalogue
	void check_against_isolation(user_interaction & dialog, bool lax) const; //< throw Erange exception if the archive only contains an isolated catalogue
	void check_header_version() const;
    };

} // end of namespace

#endif