os.chdir(source_path)
+
+ # for each file to be in the backup, do:
for path in self._recursive_walk_dir('.'):
+ # calculate stat dict for current file
+ stat = self._stat_dict(path) # TODO: reduce paths length using previous dir entries
+ stat['volume'] = self.vol_no
+
+ # backup file
tarobj.add(path)
- # TODO: reduce paths length using previous dir entries
- stat = self._stat_dict(path)
- stat['volume'] = self.vol_no
+ # retrieve file offset
stat['offset'] = tarobj.get_last_member_offset()
+ # store in the index the stat dict
s = json.dumps(stat) + '\n'
crc = binascii.crc32(s, crc) & 0xffffffff
index_fd.write(s)
# setup the volume that needs to be read
if curr_vol_no != vol_no:
+ curr_vol_no = vol_no
vol_name = self.volume_name_func(backup_path, True, vol_no)
vol_path = os.path.join(backup_path, vol_name)
if vol_fd:
tarobj = None
# seek tarfile if needed
- offset = j.get('volume', -1)
+ offset = j.get('offset', -1)
if vol_fd.tell() != offset:
vol_fd.seek(offset)
member = tarobj.next()
tarobj.extract(member)
+ os.chdir(cwd)
if tarobj:
tarobj.close()
but when there's encryption or concat compression going on it's more
complicated than that.
"""
- if isinstance(self.fileobj, _Stream):
+ if self.concat_compression:
return self.fileobj.last_block_offset
else:
- return self.fileobj.tell()
-
+ return self.last_block_offset
def getnames(self):
"""Return the members of the archive as a list of their names. It has
def test_restore_from_index(self):
'''
- Restores a full backup from using an index file.
+ Restores a full backup using an index file.
'''
# this test only works for uncompressed or concat compressed modes
if self.MODE.startswith(':') or self.MODE.startswith('|'):
# create first backup
deltatar.create_full_backup(
source_path="source_dir",
- backup_path="backup_dir",
- max_volume_size=1)
+ backup_path="backup_dir")
shutil.rmtree("source_dir")