|
|
|
#!/usr/bin/env python
|
|
|
|
|
|
|
|
"""
|
|
|
|
Simple program for filtering git repositories, similar to git filter-branch,
|
|
|
|
BFG repo cleaner, and others. The basic idea is that it works by running
|
|
|
|
git fast-export <options> | filter | git fast-import <options>
|
|
|
|
where this program not only launches the whole pipeline but also serves as
|
|
|
|
the 'filter' in the middle. It does a few additional things on top as well
|
|
|
|
in order to make it into a well-rounded filtering tool.
|
|
|
|
"""
|
|
|
|
|
|
|
|
from __future__ import print_function
|
|
|
|
|
|
|
|
import argparse
|
|
|
|
import collections
|
|
|
|
import fnmatch
|
|
|
|
import os
|
|
|
|
import re
|
|
|
|
import StringIO
|
|
|
|
import subprocess
|
|
|
|
import sys
|
|
|
|
import time
|
|
|
|
import textwrap
|
|
|
|
|
|
|
|
from datetime import tzinfo, timedelta, datetime
|
|
|
|
|
|
|
|
__all__ = ["Blob", "Reset", "FileChanges", "Commit", "Tag", "Progress",
|
|
|
|
"Checkpoint", "FastExportFilter", "ProgressWriter",
|
|
|
|
"string_to_date", "date_to_string",
|
|
|
|
"record_id_rename", "GitUtils", "FilteringOptions", "RepoFilter"]
|
|
|
|
|
|
|
|
|
|
|
|
def _timedelta_to_seconds(delta):
|
|
|
|
"""
|
|
|
|
Converts timedelta to seconds
|
|
|
|
"""
|
|
|
|
offset = delta.days*86400 + delta.seconds + (delta.microseconds+0.0)/1000000
|
|
|
|
return round(offset)
|
|
|
|
|
|
|
|
class FixedTimeZone(tzinfo):
|
|
|
|
"""
|
|
|
|
Fixed offset in minutes east from UTC.
|
|
|
|
"""
|
|
|
|
|
|
|
|
tz_re = re.compile(r'^([-+]?)(\d\d)(\d\d)$')
|
|
|
|
|
|
|
|
def __init__(self, offset_string):
|
|
|
|
tzinfo.__init__(self)
|
|
|
|
sign, hh, mm = FixedTimeZone.tz_re.match(offset_string).groups()
|
|
|
|
factor = -1 if (sign and sign == '-') else 1
|
|
|
|
self._offset = timedelta(minutes = factor*(60*int(hh) + int(mm)))
|
|
|
|
self._offset_string = offset_string
|
|
|
|
|
|
|
|
def utcoffset(self, dt):
|
|
|
|
return self._offset
|
|
|
|
|
|
|
|
def tzname(self, dt):
|
|
|
|
return self._offset_string
|
|
|
|
|
|
|
|
def dst(self, dt):
|
|
|
|
return timedelta(0)
|
|
|
|
|
|
|
|
def string_to_date(datestring):
|
|
|
|
(unix_timestamp, tz_offset) = datestring.split()
|
|
|
|
return datetime.fromtimestamp(int(unix_timestamp),
|
|
|
|
FixedTimeZone(tz_offset))
|
|
|
|
|
|
|
|
def date_to_string(dateobj):
|
|
|
|
epoch = datetime.fromtimestamp(0, dateobj.tzinfo)
|
|
|
|
return('{} {}'.format(int(_timedelta_to_seconds(dateobj - epoch)),
|
|
|
|
dateobj.tzinfo.tzname(0)))
|
|
|
|
|
|
|
|
class PathQuoting:
|
|
|
|
_unescape = {'a': '\a',
|
|
|
|
'b': '\b',
|
|
|
|
'f': '\f',
|
|
|
|
'n': '\n',
|
|
|
|
'r': '\r',
|
|
|
|
't': '\t',
|
|
|
|
'v': '\v',
|
|
|
|
'"': '"',
|
|
|
|
'\\':'\\'}
|
|
|
|
_unescape_re = re.compile(r'\\([a-z"\\]|[0-9]{3})')
|
|
|
|
_escape = [chr(x) for x in xrange(127)]+['\\'+oct(x)[1:] for x in xrange(127,256)]
|
|
|
|
_reverse = dict(map(reversed, _unescape.items()))
|
|
|
|
for x in _reverse:
|
|
|
|
_escape[ord(x)] = '\\'+_reverse[x]
|
|
|
|
_special_chars = [len(x) > 1 for x in _escape]
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def unescape_sequence(orig):
|
|
|
|
seq = orig.group(1)
|
|
|
|
return PathQuoting._unescape[seq] if len(seq) == 1 else chr(int(seq, 8))
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def dequote(quoted_string):
|
|
|
|
if quoted_string.startswith('"'):
|
|
|
|
assert quoted_string.endswith('"')
|
|
|
|
return PathQuoting._unescape_re.sub(PathQuoting.unescape_sequence,
|
|
|
|
quoted_string[1:-1])
|
|
|
|
return quoted_string
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def enquote(unquoted_string):
|
|
|
|
# Option 1: Quoting when fast-export would:
|
|
|
|
# pqsc = PathQuoting._special_chars
|
|
|
|
# if any(pqsc[ord(x)] for x in set(unquoted_string)):
|
|
|
|
# Option 2, perf hack: do minimal amount of quoting required by fast-import
|
|
|
|
if unquoted_string.startswith('"') or '\n' in unquoted_string:
|
|
|
|
pqe = PathQuoting._escape
|
|
|
|
return '"' + ''.join(pqe[ord(x)] for x in unquoted_string) + '"'
|
|
|
|
return unquoted_string
|
|
|
|
|
filter-repo: avoid merging a commit with one of its own ancestors
Pruning of empty commits can cause an entire line of history to become
empty and be pruned, resulting in a merge commit that merges some commit
with one of its ancestors. In such a case, we should remove the
unnecessary parent(s) -- which can and will often result in the merge
commit being empty so we can remove it as well.
Currently, if the side that becomes empty is the first parent side, then
we do not detect if the commit becomes empty, due to the way that
fast-export lists changes in a merge commit relative to first parent only.
A subsequent commit will address this.
Note that the callbacks could theoretically insert additional commits or
reparent our commit on top of something else, meaning that the ancestry
graph might need post-callback updates. However, in any extreme case
where that mattered, we would more or less need full updates to the
ancestry graph to be made for all the new commits from the callback as
well, and once we expect the callback to handle any ancestry graph
updates it can handle modifying it for the current commit. However, it
is hard to come up with a case where it matters, since for the most part
we just want to know whether our filtering causes commits to become
empty and knowing the source repo we are exporting from is sufficient
information without knowing any new commits inserted or reparenting that
happens elsewhere.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
class AncestryGraph(object):
|
|
|
|
"""
|
|
|
|
A class that maintains a direct acycle graph of commits for the purpose of
|
|
|
|
determining if one commit is the ancestor of another.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.cur_value = 0
|
|
|
|
|
|
|
|
# A mapping from the external identifers given to us to the simple integers
|
|
|
|
# we use in self.graph
|
|
|
|
self.value = {}
|
|
|
|
|
|
|
|
# A tuple of (depth, list-of-ancestors). Values and keys in this graph are
|
|
|
|
# all integers from the self.value dict. The depth of a commit is one more
|
|
|
|
# than the max depth of any of its ancestors.
|
|
|
|
self.graph = {}
|
|
|
|
|
|
|
|
def add_commit_and_parents(self, commit, parents):
|
|
|
|
"""
|
|
|
|
Record in graph that commit has the given parents. parents _MUST_ have
|
|
|
|
been first recorded. commit _MUST_ not have been recorded yet.
|
|
|
|
"""
|
|
|
|
assert all(p in self.value for p in parents)
|
|
|
|
assert commit not in self.value
|
|
|
|
|
|
|
|
# Get values for commit and parents
|
|
|
|
self.cur_value += 1
|
|
|
|
self.value[commit] = self.cur_value
|
|
|
|
graph_parents = [self.value[x] for x in parents]
|
|
|
|
|
|
|
|
# Determine depth for commit, then insert the info into the graph
|
|
|
|
depth = 1
|
|
|
|
if parents:
|
|
|
|
depth += max(self.graph[p][0] for p in graph_parents)
|
|
|
|
self.graph[self.cur_value] = (depth, graph_parents)
|
|
|
|
|
|
|
|
def is_ancestor(self, possible_ancestor, check):
|
|
|
|
"""
|
|
|
|
Return whether possible_ancestor is an ancestor of check
|
|
|
|
"""
|
|
|
|
a, b = self.value[possible_ancestor], self.value[check]
|
|
|
|
a_depth = self.graph[a][0]
|
|
|
|
ancestors = [b]
|
|
|
|
visited = set()
|
|
|
|
while ancestors:
|
|
|
|
ancestor = ancestors.pop()
|
|
|
|
if ancestor in visited:
|
|
|
|
continue
|
|
|
|
visited.add(ancestor)
|
|
|
|
depth, more_ancestors = self.graph[ancestor]
|
|
|
|
if ancestor == a:
|
|
|
|
return True
|
|
|
|
elif depth <= a_depth:
|
|
|
|
continue
|
|
|
|
ancestors.extend(more_ancestors)
|
|
|
|
return False
|
|
|
|
|
|
|
|
class MailmapInfo(object):
|
|
|
|
def __init__(self, filename):
|
|
|
|
self.changes = {}
|
|
|
|
self._parse_file(filename)
|
|
|
|
|
|
|
|
def _parse_file(self, filename):
|
|
|
|
name_and_email_re = re.compile(r'(.*?)\s*<([^>]+)>\s*')
|
|
|
|
comment_re = re.compile(r'\s*#.*')
|
|
|
|
if not os.access(filename, os.R_OK):
|
|
|
|
raise SystemExit("Cannot read {}".format(filename))
|
|
|
|
with open(filename) as f:
|
|
|
|
count = 0
|
|
|
|
for line in f:
|
|
|
|
count += 1
|
|
|
|
err = "Unparseable mailmap file: line #{} is bad: {}".format(count, line)
|
|
|
|
# Remove comments
|
|
|
|
line = comment_re.sub('', line)
|
|
|
|
# Remove leading and trailing whitespace
|
|
|
|
line = line.strip()
|
|
|
|
if not line:
|
|
|
|
continue
|
|
|
|
|
|
|
|
m = name_and_email_re.match(line)
|
|
|
|
if not m:
|
|
|
|
raise SystemExit(err)
|
|
|
|
proper_name, proper_email = m.groups()
|
|
|
|
if len(line) == m.end():
|
|
|
|
self.changes[(None, proper_email)] = (proper_name, proper_email)
|
|
|
|
continue
|
|
|
|
rest = line[m.end():]
|
|
|
|
m = name_and_email_re.match(rest)
|
|
|
|
if m:
|
|
|
|
commit_name, commit_email = m.groups()
|
|
|
|
if len(rest) != m.end():
|
|
|
|
raise SystemExit(err)
|
|
|
|
else:
|
|
|
|
commit_name, commit_email = rest, None
|
|
|
|
self.changes[(commit_name, commit_email)] = (proper_name, proper_email)
|
|
|
|
|
|
|
|
def translate(self, name, email):
|
|
|
|
''' Given a name and email, return the expected new name and email from the
|
|
|
|
mailmap if there is a translation rule for it, otherwise just return
|
|
|
|
the given name and email.'''
|
|
|
|
for old, new in self.changes.iteritems():
|
|
|
|
old_name, old_email = old
|
|
|
|
new_name, new_email = new
|
|
|
|
if (email == old_email or not old_email) and (
|
|
|
|
name == old_name or not old_name):
|
|
|
|
return (new_name or name, new_email or email)
|
|
|
|
return (name, email)
|
|
|
|
|
|
|
|
class ProgressWriter(object):
|
|
|
|
def __init__(self):
|
|
|
|
self._last_progress_update = time.time()
|
|
|
|
self._last_message = None
|
|
|
|
|
|
|
|
def show(self, msg):
|
|
|
|
self._last_message = msg
|
|
|
|
now = time.time()
|
|
|
|
if now - self._last_progress_update > .1:
|
|
|
|
self._last_progress_update = now
|
|
|
|
sys.stdout.write("\r{}".format(msg))
|
|
|
|
sys.stdout.flush()
|
|
|
|
|
|
|
|
def finish(self):
|
|
|
|
self._last_progress_update = 0
|
|
|
|
if self._last_message:
|
|
|
|
self.show(self._last_message)
|
|
|
|
sys.stdout.write("\n")
|
filter-repo: avoid merging a commit with one of its own ancestors
Pruning of empty commits can cause an entire line of history to become
empty and be pruned, resulting in a merge commit that merges some commit
with one of its ancestors. In such a case, we should remove the
unnecessary parent(s) -- which can and will often result in the merge
commit being empty so we can remove it as well.
Currently, if the side that becomes empty is the first parent side, then
we do not detect if the commit becomes empty, due to the way that
fast-export lists changes in a merge commit relative to first parent only.
A subsequent commit will address this.
Note that the callbacks could theoretically insert additional commits or
reparent our commit on top of something else, meaning that the ancestry
graph might need post-callback updates. However, in any extreme case
where that mattered, we would more or less need full updates to the
ancestry graph to be made for all the new commits from the callback as
well, and once we expect the callback to handle any ancestry graph
updates it can handle modifying it for the current commit. However, it
is hard to come up with a case where it matters, since for the most part
we just want to know whether our filtering causes commits to become
empty and knowing the source repo we are exporting from is sufficient
information without knowing any new commits inserted or reparenting that
happens elsewhere.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
|
|
|
|
class _IDs(object):
|
|
|
|
"""
|
|
|
|
A class that maintains the 'name domain' of all the 'marks' (short int
|
|
|
|
id for a blob/commit git object). The reason this mechanism is necessary
|
|
|
|
is because the text of fast-export may refer to an object using a different
|
|
|
|
mark than the mark that was assigned to that object using IDS.new(). This
|
|
|
|
class allows you to translate the fast-export marks (old) to the marks
|
|
|
|
assigned from IDS.new() (new).
|
|
|
|
|
|
|
|
Note that there are two reasons why the marks may differ: (1) The
|
|
|
|
user manually creates Blob or Commit objects (for insertion into the
|
|
|
|
stream) (2) We're reading the data from two different repositories
|
|
|
|
and trying to combine the data (git fast-export will number ids from
|
|
|
|
1...n, and having two 1's, two 2's, two 3's, causes issues).
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
"""
|
|
|
|
Init
|
|
|
|
"""
|
|
|
|
# The id for the next created blob/commit object
|
|
|
|
self._next_id = 1
|
|
|
|
|
|
|
|
# A map of old-ids to new-ids (1:1 map)
|
|
|
|
self._translation = {}
|
|
|
|
|
|
|
|
# A map of new-ids to every old-id that points to the new-id (1:N map)
|
|
|
|
self._reverse_translation = {}
|
|
|
|
|
|
|
|
def new(self):
|
|
|
|
"""
|
|
|
|
Should be called whenever a new blob or commit object is created. The
|
|
|
|
returned value should be used as the id/mark for that object.
|
|
|
|
"""
|
|
|
|
rv = self._next_id
|
|
|
|
self._next_id += 1
|
|
|
|
return rv
|
|
|
|
|
|
|
|
def record_rename(self, old_id, new_id, handle_transitivity = False):
|
|
|
|
"""
|
|
|
|
Record that old_id is being renamed to new_id.
|
|
|
|
"""
|
|
|
|
if old_id != new_id:
|
|
|
|
# old_id -> new_id
|
|
|
|
self._translation[old_id] = new_id
|
|
|
|
|
|
|
|
# Transitivity will be needed if new commits are being inserted mid-way
|
|
|
|
# through a branch.
|
|
|
|
if handle_transitivity:
|
|
|
|
# Anything that points to old_id should point to new_id
|
|
|
|
if old_id in self._reverse_translation:
|
|
|
|
for id_ in self._reverse_translation[old_id]:
|
|
|
|
self._translation[id_] = new_id
|
|
|
|
|
|
|
|
# Record that new_id is pointed to by old_id
|
|
|
|
if new_id not in self._reverse_translation:
|
|
|
|
self._reverse_translation[new_id] = []
|
|
|
|
self._reverse_translation[new_id].append(old_id)
|
|
|
|
|
|
|
|
def translate(self, old_id):
|
|
|
|
"""
|
|
|
|
If old_id has been mapped to an alternate id, return the alternate id.
|
|
|
|
"""
|
|
|
|
if old_id in self._translation:
|
|
|
|
return self._translation[old_id]
|
|
|
|
else:
|
|
|
|
return old_id
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
"""
|
|
|
|
Convert IDs to string; used for debugging
|
|
|
|
"""
|
|
|
|
rv = "Current count: %d\nTranslation:\n" % self._next_id
|
|
|
|
for k in sorted(self._translation):
|
|
|
|
rv += " %d -> %d\n" % (k, self._translation[k])
|
|
|
|
|
|
|
|
rv += "Reverse translation:\n"
|
|
|
|
for k in sorted(self._reverse_translation):
|
|
|
|
rv += " " + str(k) + " -> " + str(self._reverse_translation[k]) + "\n"
|
|
|
|
|
|
|
|
return rv
|
|
|
|
|
|
|
|
class _GitElement(object):
|
|
|
|
"""
|
|
|
|
The base class for all git elements that we create.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
# A string that describes what type of Git element this is
|
|
|
|
self.type = None
|
|
|
|
|
|
|
|
# A flag telling us if this Git element has been dumped
|
|
|
|
# (i.e. printed) or skipped. Typically elements that have been
|
|
|
|
# dumped or skipped will not be dumped again.
|
|
|
|
self.dumped = 0
|
|
|
|
|
|
|
|
def dump(self, file_):
|
|
|
|
"""
|
|
|
|
This version should never be called. Derived classes need to
|
|
|
|
override! We should note that subclasses should implement this
|
|
|
|
method such that the output would match the format produced by
|
|
|
|
fast-export.
|
|
|
|
"""
|
|
|
|
raise SystemExit("Unimplemented function: %s.dump()" % type(self).__name__)
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
"""
|
|
|
|
Convert GitElement to string; used for debugging
|
|
|
|
"""
|
|
|
|
old_dumped = self.dumped
|
|
|
|
writeme = StringIO.StringIO()
|
|
|
|
self.dump(writeme)
|
|
|
|
output_lines = writeme.getvalue().splitlines()
|
|
|
|
writeme.close()
|
|
|
|
self.dumped = old_dumped
|
|
|
|
return "{}:\n {}".format(type(self).__name__, "\n ".join(output_lines))
|
|
|
|
|
|
|
|
def skip(self, new_id=None):
|
|
|
|
"""
|
|
|
|
Ensures this element will not be written to output
|
|
|
|
"""
|
|
|
|
self.dumped = 2
|
|
|
|
|
|
|
|
class _GitElementWithId(_GitElement):
|
|
|
|
"""
|
|
|
|
The base class for Git elements that have IDs (commits and blobs)
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
_GitElement.__init__(self)
|
|
|
|
|
|
|
|
# The mark (short, portable id) for this element
|
|
|
|
self.id = _IDS.new()
|
|
|
|
|
|
|
|
# The previous mark for this element
|
|
|
|
self.old_id = None
|
|
|
|
|
|
|
|
def skip(self, new_id=None):
|
|
|
|
"""
|
|
|
|
This element will no longer be automatically written to output. When a
|
|
|
|
commit gets skipped, it's ID will need to be translated to that of its
|
|
|
|
parent.
|
|
|
|
"""
|
|
|
|
self.dumped = 2
|
|
|
|
|
|
|
|
_IDS.record_rename(self.old_id or self.id, new_id)
|
|
|
|
|
|
|
|
class Blob(_GitElementWithId):
|
|
|
|
"""
|
|
|
|
This class defines our representation of git blob elements (i.e. our
|
|
|
|
way of representing file contents).
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, data, original_id = None):
|
|
|
|
_GitElementWithId.__init__(self)
|
|
|
|
|
|
|
|
# Denote that this is a blob
|
|
|
|
self.type = 'blob'
|
|
|
|
|
|
|
|
# Record original id
|
|
|
|
self.original_id = original_id
|
|
|
|
|
|
|
|
# Stores the blob's data
|
|
|
|
self.data = data
|
|
|
|
|
|
|
|
def dump(self, file_):
|
|
|
|
"""
|
|
|
|
Write this blob element to a file.
|
|
|
|
"""
|
|
|
|
self.dumped = 1
|
|
|
|
|
|
|
|
file_.write('blob\n')
|
|
|
|
file_.write('mark :%d\n' % self.id)
|
|
|
|
file_.write('data %d\n%s' % (len(self.data), self.data))
|
|
|
|
file_.write('\n')
|
|
|
|
|
|
|
|
|
|
|
|
class Reset(_GitElement):
|
|
|
|
"""
|
|
|
|
This class defines our representation of git reset elements. A reset
|
|
|
|
event is the creation (or recreation) of a named branch, optionally
|
|
|
|
starting from a specific revision).
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, ref, from_ref = None):
|
|
|
|
_GitElement.__init__(self)
|
|
|
|
|
|
|
|
# Denote that this is a reset
|
|
|
|
self.type = 'reset'
|
|
|
|
|
|
|
|
# The name of the branch being (re)created
|
|
|
|
self.ref = ref
|
|
|
|
|
|
|
|
# Some reference to the branch/commit we are resetting from
|
|
|
|
self.from_ref = from_ref
|
|
|
|
|
|
|
|
def dump(self, file_):
|
|
|
|
"""
|
|
|
|
Write this reset element to a file
|
|
|
|
"""
|
|
|
|
self.dumped = 1
|
|
|
|
|
|
|
|
file_.write('reset %s\n' % self.ref)
|
|
|
|
if self.from_ref:
|
|
|
|
file_.write('from :%d\n' % self.from_ref)
|
|
|
|
file_.write('\n')
|
|
|
|
|
|
|
|
class FileChanges(_GitElement):
|
|
|
|
"""
|
|
|
|
This class defines our representation of file change elements. File change
|
|
|
|
elements are components within a Commit element.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, type_, filename, id_ = None, mode = None):
|
|
|
|
_GitElement.__init__(self)
|
|
|
|
|
|
|
|
# Denote the type of file-change (M for modify, D for delete, etc)
|
|
|
|
self.type = type_
|
|
|
|
|
|
|
|
# Record the name of the file being changed
|
|
|
|
self.filename = filename
|
|
|
|
|
|
|
|
# Record the mode (mode describes type of file entry (non-executable,
|
|
|
|
# executable, or symlink)).
|
|
|
|
self.mode = None
|
|
|
|
|
|
|
|
# blob_id is the id (mark) of the affected blob
|
|
|
|
self.blob_id = None
|
|
|
|
|
|
|
|
# For 'M' file changes (modify), expect to have id and mode
|
|
|
|
if type_ == 'M':
|
|
|
|
if mode is None:
|
|
|
|
raise SystemExit("file mode and idnum needed for %s" % filename)
|
|
|
|
self.mode = mode
|
|
|
|
self.blob_id = id_
|
|
|
|
|
filter-repo: add --analyze option
This option walks through the repository history and creates a report
with basic statistics, rename related information, and sizes of objects
and when/if those have been deleted. It primarily looks at unpacked
sizes (i.e. size of object ignoring delta-ing and compression), and
sums the size of each version of the file for each path. Additionally,
it aggregates these sums by extension and by directory, and tracks
whether paths, extensions, and directories have been deleted. This can
be very useful in determining what the big things are, and whether they
might have been considered to have been mistakes to add to the
repository in the first place.
There are numerous caveats with the determination of "deleted" and
"renamed", and can give both false positives and false negatives. But
they are only meant as a helpful heuristic to give others a starting
point for an investigation, and the information provide so far is useful.
I do want to improve the equivalence classes (rename handling), but that
is for a future commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
# For 'R' file changes (rename), expect to have newname as third arg
|
|
|
|
elif type_ == 'R':
|
|
|
|
if id_ is None:
|
|
|
|
raise SystemExit("new name needed for rename of %s" % filename)
|
|
|
|
self.filename = (self.filename, id_)
|
|
|
|
|
|
|
|
def dump(self, file_):
|
|
|
|
"""
|
|
|
|
Write this file-change element to a file
|
|
|
|
"""
|
|
|
|
skipped_blob = (self.type == 'M' and self.blob_id is None)
|
|
|
|
if skipped_blob: return
|
|
|
|
self.dumped = 1
|
|
|
|
|
|
|
|
quoted_filename = PathQuoting.enquote(self.filename)
|
|
|
|
if self.type == 'M' and isinstance(self.blob_id, int):
|
|
|
|
file_.write('M %s :%d %s\n' % (self.mode, self.blob_id, quoted_filename))
|
|
|
|
elif self.type == 'M':
|
|
|
|
file_.write('M %s %s %s\n' % (self.mode, self.blob_id, quoted_filename))
|
|
|
|
elif self.type == 'D':
|
|
|
|
file_.write('D %s\n' % quoted_filename)
|
|
|
|
else:
|
|
|
|
raise SystemExit("Unhandled filechange type: %s" % self.type)
|
|
|
|
|
|
|
|
class Commit(_GitElementWithId):
|
|
|
|
"""
|
|
|
|
This class defines our representation of commit elements. Commit elements
|
|
|
|
contain all the information associated with a commit.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, branch,
|
|
|
|
author_name, author_email, author_date,
|
|
|
|
committer_name, committer_email, committer_date,
|
|
|
|
message,
|
|
|
|
file_changes,
|
|
|
|
from_commit = None,
|
|
|
|
merge_commits = [],
|
|
|
|
original_id = None,
|
|
|
|
**kwargs):
|
|
|
|
_GitElementWithId.__init__(self)
|
|
|
|
|
|
|
|
# Denote that this is a commit element
|
|
|
|
self.type = 'commit'
|
|
|
|
|
|
|
|
# Record the affected branch
|
|
|
|
self.branch = branch
|
|
|
|
|
|
|
|
# Record original id
|
|
|
|
self.original_id = original_id
|
|
|
|
|
|
|
|
# Record author's name
|
|
|
|
self.author_name = author_name
|
|
|
|
|
|
|
|
# Record author's email
|
|
|
|
self.author_email = author_email
|
|
|
|
|
|
|
|
# Record date of authoring
|
|
|
|
self.author_date = author_date
|
|
|
|
|
|
|
|
# Record committer's name
|
|
|
|
self.committer_name = committer_name
|
|
|
|
|
|
|
|
# Record committer's email
|
|
|
|
self.committer_email = committer_email
|
|
|
|
|
|
|
|
# Record date the commit was made
|
|
|
|
self.committer_date = committer_date
|
|
|
|
|
|
|
|
# Record commit message
|
|
|
|
self.message = message
|
|
|
|
|
|
|
|
# List of file-changes associated with this commit. Note that file-changes
|
|
|
|
# are also represented as git elements
|
|
|
|
self.file_changes = file_changes
|
|
|
|
|
|
|
|
# Record the commit to initialize this branch from. This revision will be
|
|
|
|
# the first parent of the new commit
|
|
|
|
self.from_commit = from_commit
|
|
|
|
|
|
|
|
# Record additional parent commits
|
|
|
|
self.merge_commits = merge_commits
|
|
|
|
|
|
|
|
# Member below is necessary for workaround fast-import's/fast-export's
|
|
|
|
# weird handling of merges.
|
|
|
|
self.stream_number = 0
|
|
|
|
if "stream_number" in kwargs:
|
|
|
|
self.stream_number = kwargs["stream_number"]
|
|
|
|
|
|
|
|
def dump(self, file_):
|
|
|
|
"""
|
|
|
|
Write this commit element to a file.
|
|
|
|
"""
|
|
|
|
self.dumped = 1
|
|
|
|
|
|
|
|
# Workaround fast-import/fast-export weird handling of merges
|
|
|
|
if self.stream_number != _CURRENT_STREAM_NUMBER:
|
|
|
|
_EXTRA_CHANGES[self.id] = [[change for change in self.file_changes]]
|
|
|
|
|
|
|
|
merge_extra_changes = []
|
|
|
|
for parent in self.merge_commits:
|
|
|
|
if parent in _EXTRA_CHANGES:
|
|
|
|
merge_extra_changes += _EXTRA_CHANGES[parent]
|
|
|
|
|
|
|
|
for additional_changes in merge_extra_changes:
|
|
|
|
self.file_changes += additional_changes
|
|
|
|
|
|
|
|
if self.stream_number == _CURRENT_STREAM_NUMBER:
|
|
|
|
parent_extra_changes = []
|
|
|
|
if self.from_commit and self.from_commit in _EXTRA_CHANGES:
|
|
|
|
parent_extra_changes = _EXTRA_CHANGES[self.from_commit]
|
|
|
|
parent_extra_changes += merge_extra_changes
|
|
|
|
_EXTRA_CHANGES[self.id] = parent_extra_changes
|
|
|
|
# End workaround
|
|
|
|
|
|
|
|
file_.write(('commit {}\n'
|
|
|
|
'mark :{}\n'
|
|
|
|
'author {} <{}> {}\n'
|
|
|
|
'committer {} <{}> {}\n'
|
|
|
|
'data {}\n{}{}'
|
|
|
|
).format(
|
|
|
|
self.branch, self.id,
|
|
|
|
self.author_name, self.author_email, self.author_date,
|
|
|
|
self.committer_name, self.committer_email, self.committer_date,
|
|
|
|
len(self.message), self.message,
|
|
|
|
'' if self.message.endswith('\n') else '\n')
|
|
|
|
)
|
|
|
|
if self.from_commit:
|
|
|
|
mark = ':' if isinstance(self.from_commit, int) else ''
|
|
|
|
file_.write('from {}{}\n'.format(mark, self.from_commit))
|
|
|
|
for ref in self.merge_commits:
|
|
|
|
mark = ':' if isinstance(ref, int) else ''
|
|
|
|
file_.write('merge {}{}\n'.format(mark, ref))
|
|
|
|
for change in self.file_changes:
|
|
|
|
change.dump(file_)
|
|
|
|
file_.write('\n')
|
|
|
|
|
|
|
|
def get_parents(self):
|
|
|
|
"""
|
|
|
|
Return all parent commits
|
|
|
|
"""
|
|
|
|
my_parents = []
|
|
|
|
if self.from_commit:
|
|
|
|
my_parents.append(self.from_commit)
|
|
|
|
my_parents += self.merge_commits
|
|
|
|
return my_parents
|
|
|
|
|
|
|
|
def first_parent(self):
|
|
|
|
"""
|
|
|
|
Return first parent commit
|
|
|
|
"""
|
|
|
|
my_parents = self.get_parents()
|
|
|
|
if my_parents:
|
|
|
|
return my_parents[0]
|
|
|
|
return None
|
|
|
|
|
|
|
|
def skip(self, new_id=None):
|
|
|
|
_SKIPPED_COMMITS.add(self.old_id or self.id)
|
|
|
|
_GitElementWithId.skip(self, new_id)
|
|
|
|
|
|
|
|
class Tag(_GitElement):
|
|
|
|
"""
|
|
|
|
This class defines our representation of annotated tag elements.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, ref, from_ref,
|
|
|
|
tagger_name, tagger_email, tagger_date, tag_msg,
|
|
|
|
original_id = None):
|
|
|
|
_GitElement.__init__(self)
|
|
|
|
|
|
|
|
# Denote that this is a tag element
|
|
|
|
self.type = 'tag'
|
|
|
|
|
|
|
|
# Store the name of the tag
|
|
|
|
self.ref = ref
|
|
|
|
|
|
|
|
# Store the entity being tagged (this should be a commit)
|
|
|
|
self.from_ref = from_ref
|
|
|
|
|
|
|
|
# Record original id
|
|
|
|
self.original_id = original_id
|
|
|
|
|
|
|
|
# Store the name of the tagger
|
|
|
|
self.tagger_name = tagger_name
|
|
|
|
|
|
|
|
# Store the email of the tagger
|
|
|
|
self.tagger_email = tagger_email
|
|
|
|
|
|
|
|
# Store the date
|
|
|
|
self.tagger_date = tagger_date
|
|
|
|
|
|
|
|
# Store the tag message
|
|
|
|
self.tag_message = tag_msg
|
|
|
|
|
|
|
|
def dump(self, file_):
|
|
|
|
"""
|
|
|
|
Write this tag element to a file
|
|
|
|
"""
|
|
|
|
|
|
|
|
self.dumped = 1
|
|
|
|
|
|
|
|
file_.write('tag %s\n' % self.ref)
|
|
|
|
mark = ':' if isinstance(self.from_ref, int) else ''
|
|
|
|
file_.write('from {}{}\n'.format(mark, self.from_ref))
|
|
|
|
if self.tagger_name:
|
|
|
|
file_.write('tagger %s <%s> ' % (self.tagger_name, self.tagger_email))
|
|
|
|
file_.write(self.tagger_date)
|
|
|
|
file_.write('\n')
|
|
|
|
file_.write('data %d\n%s' % (len(self.tag_message), self.tag_message))
|
|
|
|
file_.write('\n')
|
|
|
|
|
|
|
|
class Progress(_GitElement):
|
|
|
|
"""
|
|
|
|
This class defines our representation of progress elements. The progress
|
|
|
|
element only contains a progress message, which is printed by fast-import
|
|
|
|
when it processes the progress output.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, message):
|
|
|
|
_GitElement.__init__(self)
|
|
|
|
|
|
|
|
# Denote that this is a progress element
|
|
|
|
self.type = 'progress'
|
|
|
|
|
|
|
|
# Store the progress message
|
|
|
|
self.message = message
|
|
|
|
|
|
|
|
def dump(self, file_):
|
|
|
|
"""
|
|
|
|
Write this progress element to a file
|
|
|
|
"""
|
|
|
|
self.dumped = 1
|
|
|
|
|
|
|
|
file_.write('progress %s\n' % self.message)
|
|
|
|
#file_.write('\n')
|
|
|
|
|
|
|
|
class Checkpoint(_GitElement):
|
|
|
|
"""
|
|
|
|
This class defines our representation of checkpoint elements. These
|
|
|
|
elements represent events which force fast-import to close the current
|
|
|
|
packfile, start a new one, and to save out all current branch refs, tags
|
|
|
|
and marks.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
_GitElement.__init__(self)
|
|
|
|
|
|
|
|
# Denote that this is a checkpoint element
|
|
|
|
self.type = 'checkpoint'
|
|
|
|
|
|
|
|
def dump(self, file_):
|
|
|
|
"""
|
|
|
|
Write this checkpoint element to a file
|
|
|
|
"""
|
|
|
|
self.dumped = 1
|
|
|
|
|
|
|
|
file_.write('checkpoint\n')
|
|
|
|
file_.write('\n')
|
|
|
|
|
|
|
|
class LiteralCommand(_GitElement):
|
|
|
|
"""
|
|
|
|
This class defines our representation of commands. The literal command
|
|
|
|
includes only a single line, and is not processed in any special way.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, line):
|
|
|
|
_GitElement.__init__(self)
|
|
|
|
|
|
|
|
# Denote that this is a literal element
|
|
|
|
self.type = 'literal'
|
|
|
|
|
|
|
|
# Store the command
|
|
|
|
self.line = line
|
|
|
|
|
|
|
|
def dump(self, file_):
|
|
|
|
"""
|
|
|
|
Write this progress element to a file
|
|
|
|
"""
|
|
|
|
self.dumped = 1
|
|
|
|
|
|
|
|
file_.write(self.line)
|
|
|
|
|
|
|
|
class FastExportFilter(object):
|
|
|
|
"""
|
|
|
|
A class for parsing and handling the output from fast-export. This
|
|
|
|
class allows the user to register callbacks when various types of
|
|
|
|
data are encountered in the fast-export output. The basic idea is that,
|
|
|
|
FastExportFilter takes fast-export output, creates the various objects
|
|
|
|
as it encounters them, the user gets to use/modify these objects via
|
|
|
|
callbacks, and finally FastExportFilter outputs the modified objects
|
|
|
|
in fast-import format (presumably so they can be used to create a new
|
|
|
|
repo).
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, repo_working_dir,
|
|
|
|
tag_callback = None, commit_callback = None,
|
|
|
|
blob_callback = None, progress_callback = None,
|
|
|
|
reset_callback = None, checkpoint_callback = None,
|
|
|
|
everything_callback = None):
|
|
|
|
# Repo we are exporting
|
|
|
|
self._repo_working_dir = repo_working_dir
|
|
|
|
|
|
|
|
# Members below simply store callback functions for the various git
|
|
|
|
# elements
|
|
|
|
self._tag_callback = tag_callback
|
|
|
|
self._blob_callback = blob_callback
|
|
|
|
self._reset_callback = reset_callback
|
|
|
|
self._commit_callback = commit_callback
|
|
|
|
self._progress_callback = progress_callback
|
|
|
|
self._checkpoint_callback = checkpoint_callback
|
|
|
|
self._everything_callback = everything_callback
|
|
|
|
|
|
|
|
# A list of all the refs we've seen, plus any mark we need to set them
|
|
|
|
# to if the last (or even only) commit on that branch was pruned
|
|
|
|
self._seen_refs = {}
|
|
|
|
|
filter-repo: avoid merging a commit with one of its own ancestors
Pruning of empty commits can cause an entire line of history to become
empty and be pruned, resulting in a merge commit that merges some commit
with one of its ancestors. In such a case, we should remove the
unnecessary parent(s) -- which can and will often result in the merge
commit being empty so we can remove it as well.
Currently, if the side that becomes empty is the first parent side, then
we do not detect if the commit becomes empty, due to the way that
fast-export lists changes in a merge commit relative to first parent only.
A subsequent commit will address this.
Note that the callbacks could theoretically insert additional commits or
reparent our commit on top of something else, meaning that the ancestry
graph might need post-callback updates. However, in any extreme case
where that mattered, we would more or less need full updates to the
ancestry graph to be made for all the new commits from the callback as
well, and once we expect the callback to handle any ancestry graph
updates it can handle modifying it for the current commit. However, it
is hard to come up with a case where it matters, since for the most part
we just want to know whether our filtering causes commits to become
empty and knowing the source repo we are exporting from is sufficient
information without knowing any new commits inserted or reparenting that
happens elsewhere.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
# A tuple of (depth, list-of-ancestors). Commits and ancestors are
|
|
|
|
# identified by their id (their 'mark' in fast-export or fast-import
|
|
|
|
# speak). The depth of a commit is one more than the max depth of any
|
|
|
|
# of its ancestors.
|
|
|
|
self._graph = AncestryGraph()
|
|
|
|
|
|
|
|
# A set of commit hash pairs (oldhash, newhash) which used to be merge
|
|
|
|
# commits but due to filtering were turned into non-merge commits.
|
|
|
|
# The commits probably have suboptimal commit messages (e.g. "Merge branch
|
|
|
|
# next into master").
|
|
|
|
self._commits_no_longer_merges = []
|
|
|
|
|
|
|
|
# A dict of original_ids to new_ids; filtering commits means getting
|
|
|
|
# new commit hash (sha1sums), and we record the mapping both for
|
|
|
|
# diagnostic purposes and so we can rewrite commit messages. Note that
|
|
|
|
# the new_id can be None rather than a commit hash if the original
|
|
|
|
# commit became empty and was pruned or was otherwise dropped.
|
|
|
|
self._commit_renames = {}
|
|
|
|
|
|
|
|
# A set of original_ids for which we have not yet gotten the
|
|
|
|
# new_ids; we use OrderedDict because we need to know the order of
|
|
|
|
# insertion, but the values are always ignored (and set to None).
|
|
|
|
# If there was an OrderedSet class, I'd use it instead.
|
|
|
|
self._pending_renames = collections.OrderedDict()
|
|
|
|
|
|
|
|
# A dict of commit_hash[1:7] -> set(commit_hashes with that prefix).
|
|
|
|
#
|
|
|
|
# It's common for commit messages to refer to commits by abbreviated
|
|
|
|
# commit hashes, as short as 7 characters. To facilitate translating
|
|
|
|
# such short hashes, we have a mapping of prefixes to full old hashes.
|
|
|
|
self._commit_short_old_hashes = collections.defaultdict(set)
|
|
|
|
|
|
|
|
# A set of commit hash references appearing in commit messages which
|
|
|
|
# mapped to a valid commit that was removed entirely in the filtering
|
|
|
|
# process. The commit message will continue to reference the
|
|
|
|
# now-missing commit hash, since there was nothing to map it to.
|
|
|
|
self._commits_referenced_but_removed = set()
|
|
|
|
|
|
|
|
# A handle to the input source for the fast-export data
|
|
|
|
self._input = None
|
|
|
|
|
|
|
|
# A handle to the output file for the output we generate (we call dump
|
|
|
|
# on many of the git elements we create).
|
|
|
|
self._output = None
|
|
|
|
|
|
|
|
# A pair of (input, output) pipes for communicating with fast import.
|
|
|
|
self._fast_import_pipes = None
|
|
|
|
|
|
|
|
# Stores the contents of the current line of input being parsed
|
|
|
|
self._currentline = ''
|
|
|
|
|
|
|
|
# Stores a translation of ids, useful when reading the output of a second
|
|
|
|
# or third (or etc.) git fast-export output stream
|
|
|
|
self._id_offset = 0
|
|
|
|
|
|
|
|
# Progress handling (number of commits parsed, etc.)
|
|
|
|
self._progress_writer = ProgressWriter()
|
|
|
|
self._num_commits = 0
|
|
|
|
self._quiet = False
|
|
|
|
|
|
|
|
# Whether we've run our post-processing extra commands
|
|
|
|
self._finalize_handled = False
|
|
|
|
|
|
|
|
# Names of files that were tweaked in any commit; such paths could lead
|
|
|
|
# to subsequent commits being empty
|
|
|
|
self._files_tweaked = set()
|
|
|
|
|
|
|
|
# Compile some regexes and cache those
|
|
|
|
self._mark_re = re.compile(r'mark :(\d+)\n$')
|
|
|
|
self._parent_regexes = {}
|
|
|
|
parent_regex_rules = ('{} :(\d+)\n$', '{} ([0-9a-f]{{40}})\n')
|
|
|
|
for parent_refname in ('from', 'merge'):
|
|
|
|
ans = [re.compile(x.format(parent_refname)) for x in parent_regex_rules]
|
|
|
|
self._parent_regexes[parent_refname] = ans
|
|
|
|
self._quoted_string_re = re.compile(r'"(?:[^"\\]|\\.)*"')
|
|
|
|
self._refline_regexes = {}
|
|
|
|
for refline_name in ('reset', 'commit', 'tag', 'progress'):
|
|
|
|
self._refline_regexes[refline_name] = re.compile(refline_name+' (.*)\n$')
|
|
|
|
self._user_regexes = {}
|
|
|
|
for user in ('author', 'committer', 'tagger'):
|
|
|
|
self._user_regexes[user] = re.compile(user + ' (.*?) <(.*?)> (.*)\n$')
|
|
|
|
self._hash_re = re.compile(r'(\b[0-9a-f]{7,40}\b)')
|
|
|
|
|
|
|
|
def _advance_currentline(self):
|
|
|
|
"""
|
|
|
|
Grab the next line of input
|
|
|
|
"""
|
|
|
|
self._currentline = self._input.readline()
|
|
|
|
|
|
|
|
def _parse_optional_mark(self):
|
|
|
|
"""
|
|
|
|
If the current line contains a mark, parse it and advance to the
|
|
|
|
next line; return None otherwise
|
|
|
|
"""
|
|
|
|
mark = None
|
|
|
|
matches = self._mark_re.match(self._currentline)
|
|
|
|
if matches:
|
|
|
|
mark = int(matches.group(1))+self._id_offset
|
|
|
|
self._advance_currentline()
|
|
|
|
return mark
|
|
|
|
|
|
|
|
def _parse_optional_parent_ref(self, refname):
|
|
|
|
"""
|
|
|
|
If the current line contains a reference to a parent commit, then
|
|
|
|
parse it and advance the current line; otherwise return None. Note
|
|
|
|
that the name of the reference ('from', 'merge') must match the
|
|
|
|
refname arg.
|
|
|
|
"""
|
filter-repo: modify parse_optional_parent_ref to return original parent too
commits may not have any parents at all. As such,
parse_optional_parent_ref() is used expecting that it will sometimes
return None.
Now, when commits are skipped, we have a scheme to translate anyone that
depends on such commits to instead depend on the nearest ancestor of
such commits. If the entire ancestry of a commit was skipped along with
a comit, then that commit will be translated to None, which is
indistinguishable from there having been no parent to begin with.
Sometimes our scheme needs to distinguish between a commit that started
with no parents and one which ended up with no parents, so we need a way
to tell these apart.
Also, not knowing the original parent makes it hard for us to
determine if the original had the same weird topology that the current
commit does. For example, it is possible for a merge commit to have
one parent be the ancestor of another (particularly when --no-ff is
passed to git merge), or even for a merge commit to have the same
commit used as both parents (if you use low-level commands to create
a crazy commit). There are cases where the pruning of some commits
could cause either of these situations to arise, and it's useful to be
able to distinguish between intentionally "weird" history and history
that has been made weird due to other pruning, because the latter we
may have reason to do additional pruning on.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
orig_baseref, baseref = None, None
|
|
|
|
rule, altrule = self._parent_regexes[refname]
|
|
|
|
matches = rule.match(self._currentline)
|
|
|
|
if matches:
|
filter-repo: modify parse_optional_parent_ref to return original parent too
commits may not have any parents at all. As such,
parse_optional_parent_ref() is used expecting that it will sometimes
return None.
Now, when commits are skipped, we have a scheme to translate anyone that
depends on such commits to instead depend on the nearest ancestor of
such commits. If the entire ancestry of a commit was skipped along with
a comit, then that commit will be translated to None, which is
indistinguishable from there having been no parent to begin with.
Sometimes our scheme needs to distinguish between a commit that started
with no parents and one which ended up with no parents, so we need a way
to tell these apart.
Also, not knowing the original parent makes it hard for us to
determine if the original had the same weird topology that the current
commit does. For example, it is possible for a merge commit to have
one parent be the ancestor of another (particularly when --no-ff is
passed to git merge), or even for a merge commit to have the same
commit used as both parents (if you use low-level commands to create
a crazy commit). There are cases where the pruning of some commits
could cause either of these situations to arise, and it's useful to be
able to distinguish between intentionally "weird" history and history
that has been made weird due to other pruning, because the latter we
may have reason to do additional pruning on.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
orig_baseref = int(matches.group(1)) + self._id_offset
|
|
|
|
# We translate the parent commit mark to what it needs to be in
|
|
|
|
# our mark namespace
|
filter-repo: modify parse_optional_parent_ref to return original parent too
commits may not have any parents at all. As such,
parse_optional_parent_ref() is used expecting that it will sometimes
return None.
Now, when commits are skipped, we have a scheme to translate anyone that
depends on such commits to instead depend on the nearest ancestor of
such commits. If the entire ancestry of a commit was skipped along with
a comit, then that commit will be translated to None, which is
indistinguishable from there having been no parent to begin with.
Sometimes our scheme needs to distinguish between a commit that started
with no parents and one which ended up with no parents, so we need a way
to tell these apart.
Also, not knowing the original parent makes it hard for us to
determine if the original had the same weird topology that the current
commit does. For example, it is possible for a merge commit to have
one parent be the ancestor of another (particularly when --no-ff is
passed to git merge), or even for a merge commit to have the same
commit used as both parents (if you use low-level commands to create
a crazy commit). There are cases where the pruning of some commits
could cause either of these situations to arise, and it's useful to be
able to distinguish between intentionally "weird" history and history
that has been made weird due to other pruning, because the latter we
may have reason to do additional pruning on.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
baseref = _IDS.translate(orig_baseref)
|
|
|
|
self._advance_currentline()
|
|
|
|
else:
|
|
|
|
matches = altrule.match(self._currentline)
|
|
|
|
if matches:
|
filter-repo: modify parse_optional_parent_ref to return original parent too
commits may not have any parents at all. As such,
parse_optional_parent_ref() is used expecting that it will sometimes
return None.
Now, when commits are skipped, we have a scheme to translate anyone that
depends on such commits to instead depend on the nearest ancestor of
such commits. If the entire ancestry of a commit was skipped along with
a comit, then that commit will be translated to None, which is
indistinguishable from there having been no parent to begin with.
Sometimes our scheme needs to distinguish between a commit that started
with no parents and one which ended up with no parents, so we need a way
to tell these apart.
Also, not knowing the original parent makes it hard for us to
determine if the original had the same weird topology that the current
commit does. For example, it is possible for a merge commit to have
one parent be the ancestor of another (particularly when --no-ff is
passed to git merge), or even for a merge commit to have the same
commit used as both parents (if you use low-level commands to create
a crazy commit). There are cases where the pruning of some commits
could cause either of these situations to arise, and it's useful to be
able to distinguish between intentionally "weird" history and history
that has been made weird due to other pruning, because the latter we
may have reason to do additional pruning on.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
orig_baseref = matches.group(1)
|
|
|
|
baseref = orig_baseref
|
|
|
|
self._advance_currentline()
|
filter-repo: modify parse_optional_parent_ref to return original parent too
commits may not have any parents at all. As such,
parse_optional_parent_ref() is used expecting that it will sometimes
return None.
Now, when commits are skipped, we have a scheme to translate anyone that
depends on such commits to instead depend on the nearest ancestor of
such commits. If the entire ancestry of a commit was skipped along with
a comit, then that commit will be translated to None, which is
indistinguishable from there having been no parent to begin with.
Sometimes our scheme needs to distinguish between a commit that started
with no parents and one which ended up with no parents, so we need a way
to tell these apart.
Also, not knowing the original parent makes it hard for us to
determine if the original had the same weird topology that the current
commit does. For example, it is possible for a merge commit to have
one parent be the ancestor of another (particularly when --no-ff is
passed to git merge), or even for a merge commit to have the same
commit used as both parents (if you use low-level commands to create
a crazy commit). There are cases where the pruning of some commits
could cause either of these situations to arise, and it's useful to be
able to distinguish between intentionally "weird" history and history
that has been made weird due to other pruning, because the latter we
may have reason to do additional pruning on.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
return orig_baseref, baseref
|
|
|
|
|
|
|
|
def _parse_optional_filechange(self):
|
|
|
|
"""
|
|
|
|
If the current line contains a file-change object, then parse it
|
|
|
|
and advance the current line; otherwise return None. We only care
|
|
|
|
about file changes of type 'M' and 'D' (these are the only types
|
|
|
|
of file-changes that fast-export will provide).
|
|
|
|
"""
|
|
|
|
filechange = None
|
|
|
|
changetype = self._currentline[0]
|
|
|
|
if changetype == 'M':
|
|
|
|
(changetype, mode, idnum, path) = self._currentline.split(None, 3)
|
|
|
|
if idnum[0] == ':':
|
|
|
|
idnum = idnum[1:]
|
|
|
|
path = path.rstrip('\n')
|
|
|
|
# We translate the idnum to our id system
|
|
|
|
if len(idnum) != 40:
|
|
|
|
idnum = _IDS.translate( int(idnum)+self._id_offset )
|
|
|
|
if idnum is not None:
|
|
|
|
if path.startswith('"'):
|
|
|
|
path = PathQuoting.dequote(path)
|
|
|
|
filechange = FileChanges('M', path, idnum, mode)
|
|
|
|
else:
|
|
|
|
filechange = 'skipped'
|
|
|
|
self._advance_currentline()
|
|
|
|
elif changetype == 'D':
|
|
|
|
(changetype, path) = self._currentline.split(None, 1)
|
|
|
|
path = path.rstrip('\n')
|
|
|
|
if path.startswith('"'):
|
|
|
|
path = PathQuoting.dequote(path)
|
|
|
|
filechange = FileChanges('D', path)
|
|
|
|
self._advance_currentline()
|
|
|
|
elif changetype == 'R':
|
filter-repo: add --analyze option
This option walks through the repository history and creates a report
with basic statistics, rename related information, and sizes of objects
and when/if those have been deleted. It primarily looks at unpacked
sizes (i.e. size of object ignoring delta-ing and compression), and
sums the size of each version of the file for each path. Additionally,
it aggregates these sums by extension and by directory, and tracks
whether paths, extensions, and directories have been deleted. This can
be very useful in determining what the big things are, and whether they
might have been considered to have been mistakes to add to the
repository in the first place.
There are numerous caveats with the determination of "deleted" and
"renamed", and can give both false positives and false negatives. But
they are only meant as a helpful heuristic to give others a starting
point for an investigation, and the information provide so far is useful.
I do want to improve the equivalence classes (rename handling), but that
is for a future commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
rest = self._currentline[2:-1]
|
|
|
|
if rest.startswith('"'):
|
|
|
|
m = self._quoted_string_re.match(rest)
|
filter-repo: add --analyze option
This option walks through the repository history and creates a report
with basic statistics, rename related information, and sizes of objects
and when/if those have been deleted. It primarily looks at unpacked
sizes (i.e. size of object ignoring delta-ing and compression), and
sums the size of each version of the file for each path. Additionally,
it aggregates these sums by extension and by directory, and tracks
whether paths, extensions, and directories have been deleted. This can
be very useful in determining what the big things are, and whether they
might have been considered to have been mistakes to add to the
repository in the first place.
There are numerous caveats with the determination of "deleted" and
"renamed", and can give both false positives and false negatives. But
they are only meant as a helpful heuristic to give others a starting
point for an investigation, and the information provide so far is useful.
I do want to improve the equivalence classes (rename handling), but that
is for a future commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
if not m:
|
|
|
|
raise SystemExit("Couldn't parse rename source")
|
|
|
|
orig = PathQuoting.dequote(m.group(0))
|
filter-repo: add --analyze option
This option walks through the repository history and creates a report
with basic statistics, rename related information, and sizes of objects
and when/if those have been deleted. It primarily looks at unpacked
sizes (i.e. size of object ignoring delta-ing and compression), and
sums the size of each version of the file for each path. Additionally,
it aggregates these sums by extension and by directory, and tracks
whether paths, extensions, and directories have been deleted. This can
be very useful in determining what the big things are, and whether they
might have been considered to have been mistakes to add to the
repository in the first place.
There are numerous caveats with the determination of "deleted" and
"renamed", and can give both false positives and false negatives. But
they are only meant as a helpful heuristic to give others a starting
point for an investigation, and the information provide so far is useful.
I do want to improve the equivalence classes (rename handling), but that
is for a future commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
new = rest[m.end()+1:]
|
|
|
|
else:
|
|
|
|
orig, new = rest.split(' ', 1)
|
|
|
|
if new.startswith('"'):
|
|
|
|
new = PathQuoting.dequote(new)
|
filter-repo: add --analyze option
This option walks through the repository history and creates a report
with basic statistics, rename related information, and sizes of objects
and when/if those have been deleted. It primarily looks at unpacked
sizes (i.e. size of object ignoring delta-ing and compression), and
sums the size of each version of the file for each path. Additionally,
it aggregates these sums by extension and by directory, and tracks
whether paths, extensions, and directories have been deleted. This can
be very useful in determining what the big things are, and whether they
might have been considered to have been mistakes to add to the
repository in the first place.
There are numerous caveats with the determination of "deleted" and
"renamed", and can give both false positives and false negatives. But
they are only meant as a helpful heuristic to give others a starting
point for an investigation, and the information provide so far is useful.
I do want to improve the equivalence classes (rename handling), but that
is for a future commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
filechange = FileChanges('R', orig, new)
|
|
|
|
self._advance_currentline()
|
|
|
|
return filechange
|
|
|
|
|
|
|
|
def _parse_original_id(self):
|
|
|
|
original_id = self._currentline[len('original-oid '):].rstrip()
|
|
|
|
self._advance_currentline()
|
|
|
|
return original_id
|
|
|
|
|
|
|
|
def _parse_ref_line(self, refname):
|
|
|
|
"""
|
|
|
|
Parses string data (often a branch name) from current-line. The name of
|
|
|
|
the string data must match the refname arg. The program will crash if
|
|
|
|
current-line does not match, so current-line will always be advanced if
|
|
|
|
this method returns.
|
|
|
|
"""
|
|
|
|
matches = self._refline_regexes[refname].match(self._currentline)
|
|
|
|
if not matches:
|
|
|
|
raise SystemExit("Malformed %s line: '%s'" %
|
|
|
|
(refname, self._currentline))
|
|
|
|
ref = matches.group(1)
|
|
|
|
self._advance_currentline()
|
|
|
|
return ref
|
|
|
|
|
|
|
|
def _parse_user(self, usertype):
|
|
|
|
"""
|
|
|
|
Get user name, email, datestamp from current-line. Current-line will
|
|
|
|
be advanced.
|
|
|
|
"""
|
|
|
|
user_regex = self._user_regexes[usertype]
|
|
|
|
(name, email, when) = user_regex.match(self._currentline).groups()
|
|
|
|
|
|
|
|
# TimeZone idiocy; IST is any of four timezones, so someone translated
|
|
|
|
# it to something that was totally invalid...and it got recorded that
|
|
|
|
# way. Others have suggested just using an invalid timezone that
|
|
|
|
# fast-import will not choke on. Let's do that. Note that +051800
|
|
|
|
# seems to be the only weird timezone found in the wild, by me or some
|
|
|
|
# other posts google returned on the subject...
|
|
|
|
if when.endswith('+051800'):
|
|
|
|
when = when[0:-7]+'+0261'
|
|
|
|
|
|
|
|
self._advance_currentline()
|
|
|
|
return (name, email, when)
|
|
|
|
|
|
|
|
def _parse_data(self):
|
|
|
|
"""
|
|
|
|
Reads data from _input. Current-line will be advanced until it is beyond
|
|
|
|
the data.
|
|
|
|
"""
|
|
|
|
fields = self._currentline.split()
|
|
|
|
assert fields[0] == 'data'
|
|
|
|
size = int(fields[1])
|
|
|
|
data = self._input.read(size)
|
|
|
|
self._advance_currentline()
|
|
|
|
if self._currentline == '\n':
|
|
|
|
self._advance_currentline()
|
|
|
|
return data
|
|
|
|
|
|
|
|
def _parse_blob(self):
|
|
|
|
"""
|
|
|
|
Parse input data into a Blob object. Once the Blob has been created, it
|
|
|
|
will be handed off to the appropriate callbacks. Current-line will be
|
|
|
|
advanced until it is beyond this blob's data. The Blob will be dumped
|
|
|
|
to _output once everything else is done (unless it has been skipped by
|
|
|
|
the callback).
|
|
|
|
"""
|
|
|
|
# Parse the Blob
|
|
|
|
self._advance_currentline()
|
|
|
|
id_ = self._parse_optional_mark()
|
|
|
|
|
|
|
|
original_id = None
|
|
|
|
if self._currentline.startswith('original-oid'):
|
|
|
|
original_id = self._parse_original_id();
|
|
|
|
|
|
|
|
data = self._parse_data()
|
|
|
|
if self._currentline == '\n':
|
|
|
|
self._advance_currentline()
|
|
|
|
|
|
|
|
# Create the blob
|
|
|
|
blob = Blob(data, original_id)
|
|
|
|
|
|
|
|
# If fast-export text had a mark for this blob, need to make sure this
|
|
|
|
# mark translates to the blob's true id.
|
|
|
|
if id_:
|
|
|
|
blob.old_id = id_
|
|
|
|
_IDS.record_rename(id_, blob.id)
|
|
|
|
|
|
|
|
# Call any user callback to allow them to use/modify the blob
|
|
|
|
if self._blob_callback:
|
|
|
|
self._blob_callback(blob)
|
|
|
|
if self._everything_callback:
|
|
|
|
self._everything_callback('blob', blob)
|
|
|
|
|
|
|
|
# Now print the resulting blob
|
|
|
|
if not blob.dumped:
|
|
|
|
blob.dump(self._output)
|
|
|
|
|
|
|
|
def _parse_reset(self):
|
|
|
|
"""
|
|
|
|
Parse input data into a Reset object. Once the Reset has been created,
|
|
|
|
it will be handed off to the appropriate callbacks. Current-line will
|
|
|
|
be advanced until it is beyond the reset data. The Reset will be dumped
|
|
|
|
to _output once everything else is done (unless it has been skipped by
|
|
|
|
the callback).
|
|
|
|
"""
|
|
|
|
# Parse the Reset
|
|
|
|
ref = self._parse_ref_line('reset')
|
filter-repo: modify parse_optional_parent_ref to return original parent too
commits may not have any parents at all. As such,
parse_optional_parent_ref() is used expecting that it will sometimes
return None.
Now, when commits are skipped, we have a scheme to translate anyone that
depends on such commits to instead depend on the nearest ancestor of
such commits. If the entire ancestry of a commit was skipped along with
a comit, then that commit will be translated to None, which is
indistinguishable from there having been no parent to begin with.
Sometimes our scheme needs to distinguish between a commit that started
with no parents and one which ended up with no parents, so we need a way
to tell these apart.
Also, not knowing the original parent makes it hard for us to
determine if the original had the same weird topology that the current
commit does. For example, it is possible for a merge commit to have
one parent be the ancestor of another (particularly when --no-ff is
passed to git merge), or even for a merge commit to have the same
commit used as both parents (if you use low-level commands to create
a crazy commit). There are cases where the pruning of some commits
could cause either of these situations to arise, and it's useful to be
able to distinguish between intentionally "weird" history and history
that has been made weird due to other pruning, because the latter we
may have reason to do additional pruning on.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
ignoreme, from_ref = self._parse_optional_parent_ref('from')
|
|
|
|
if self._currentline == '\n':
|
|
|
|
self._advance_currentline()
|
|
|
|
|
|
|
|
# fast-export likes to print extraneous resets that serve no purpose.
|
|
|
|
# While we could continue processing such resets, that is a waste of
|
|
|
|
# resources. Also, we want to avoid recording that this ref was
|
|
|
|
# seen in such cases, since this ref could be rewritten to nothing.
|
|
|
|
if not from_ref:
|
|
|
|
return
|
|
|
|
|
|
|
|
# Create the reset
|
|
|
|
reset = Reset(ref, from_ref)
|
|
|
|
|
|
|
|
# Call any user callback to allow them to modify the reset
|
|
|
|
if self._reset_callback:
|
|
|
|
self._reset_callback(reset)
|
|
|
|
if self._everything_callback:
|
|
|
|
self._everything_callback('reset', reset)
|
|
|
|
|
|
|
|
# Now print the resulting reset
|
|
|
|
self._seen_refs[reset.ref] = None
|
|
|
|
if not reset.dumped:
|
|
|
|
reset.dump(self._output)
|
|
|
|
|
|
|
|
def _get_rename(self, old_hash):
|
|
|
|
# If we already know the rename, just return it
|
|
|
|
new_hash = self._commit_renames.get(old_hash, None)
|
|
|
|
if new_hash:
|
|
|
|
return new_hash
|
|
|
|
|
|
|
|
# If it's not in the remaining pending renames, we don't know it
|
|
|
|
if old_hash is not None and old_hash not in self._pending_renames:
|
|
|
|
return None
|
|
|
|
|
|
|
|
# Read through the pending renames until we find it or we've read them all,
|
|
|
|
# and return whatever we might find
|
|
|
|
self._flush_renames(old_hash)
|
|
|
|
return self._commit_renames.get(old_hash, None)
|
|
|
|
|
|
|
|
def _flush_renames(self, old_hash=None, limit=0):
|
|
|
|
# Parse through self._pending_renames until we have read enough. We have
|
|
|
|
# read enough if:
|
|
|
|
# self._pending_renames is empty
|
|
|
|
# old_hash != None and we found a rename for old_hash
|
|
|
|
# limit > 0 and len(self._pending_renames) started less than 2*limit
|
|
|
|
# limit > 0 and len(self._pending_renames) < limit
|
|
|
|
if limit and len(self._pending_renames) < 2 * limit:
|
|
|
|
return
|
|
|
|
fi_input, fi_output = self._fast_import_pipes
|
|
|
|
while self._pending_renames:
|
|
|
|
orig_id, ignore = self._pending_renames.popitem(last=False)
|
|
|
|
new_id = fi_output.readline().rstrip()
|
|
|
|
self._commit_renames[orig_id] = new_id
|
|
|
|
if old_hash == orig_id:
|
|
|
|
return
|
|
|
|
if limit and len(self._pending_renames) < limit:
|
|
|
|
return
|
|
|
|
|
|
|
|
def _translate_commit_hash(self, matchobj):
|
|
|
|
old_hash = matchobj.group(1)
|
|
|
|
orig_len = len(old_hash)
|
|
|
|
new_hash = self._get_rename(old_hash)
|
|
|
|
if new_hash is None:
|
|
|
|
if old_hash[0:7] not in self._commit_short_old_hashes:
|
|
|
|
return old_hash
|
|
|
|
possibilities = self._commit_short_old_hashes[old_hash[0:7]]
|
|
|
|
matches = [x for x in possibilities
|
|
|
|
if x[0:orig_len] == old_hash]
|
|
|
|
if len(matches) != 1:
|
|
|
|
return old_hash
|
|
|
|
old_hash = matches[0]
|
|
|
|
new_hash = self._get_rename(old_hash)
|
|
|
|
|
|
|
|
if new_hash is None:
|
|
|
|
self._commits_referenced_but_removed.add(old_hash)
|
|
|
|
return old_hash[0:orig_len]
|
|
|
|
else:
|
|
|
|
return new_hash[0:orig_len]
|
|
|
|
|
|
|
|
def trim_extra_parents(self, orig_parents, parents):
|
|
|
|
'''Due to pruning of empty commits, some parents could be non-existent
|
|
|
|
(None) or otherwise redundant. Remove the non-existent parents, and
|
|
|
|
remove redundant parents so long as that doesn't transform a merge
|
|
|
|
commit into a non-merge commit.
|
|
|
|
|
|
|
|
Returns a tuple:
|
|
|
|
(parents, new_first_parent_if_would_become_non_merge)'''
|
|
|
|
|
|
|
|
# Pruning of empty commits means multiple things:
|
|
|
|
# * An original parent of this commit may have been pruned causing the
|
|
|
|
# need to rewrite the reported parent to the nearest ancestor. We
|
|
|
|
# want to know when we're dealing with such a parent.
|
|
|
|
# * Further, there may be no "nearest ancestor" if the entire history
|
|
|
|
# of that parent was also pruned. (Detectable by the parent being
|
|
|
|
# 'None')
|
|
|
|
# Remove all parents rewritten to None, and keep track of which parents
|
|
|
|
# were rewritten to an ancestor.
|
|
|
|
tmp = zip(parents, [x in _SKIPPED_COMMITS for x in orig_parents])
|
|
|
|
tmp2 = [x for x in tmp if x[0] is not None]
|
|
|
|
parents, is_rewritten = [list(x) for x in zip(*tmp2)] if tmp2 else ([], [])
|
|
|
|
|
|
|
|
# However, the way fast-export/fast-import split parents into from_commit
|
|
|
|
# and merge_commits means we'd rather a parentless commit be represented
|
|
|
|
# as a list containing a single None entry.
|
|
|
|
if not parents:
|
|
|
|
parents.append(None)
|
|
|
|
|
|
|
|
# We can't have redundant parents if we don't have at least 2 parents
|
|
|
|
if len(parents) < 2:
|
|
|
|
return parents, None
|
|
|
|
|
|
|
|
# Remove duplicate parents (if both sides of history have lots of commits
|
|
|
|
# which become empty due to pruning, the most recent ancestor on both
|
|
|
|
# sides may be the same commit), except only remove parents that have
|
|
|
|
# been rewritten due to previous empty pruning.
|
|
|
|
seen = set()
|
|
|
|
seen_add = seen.add
|
|
|
|
# Deleting duplicate rewritten parents means keeping parents if either
|
|
|
|
# they have not been seen or they are ones that have not been rewritten.
|
|
|
|
parents_copy = parents
|
|
|
|
pairs = [[p, is_rewritten[i]] for i, p in enumerate(parents)
|
|
|
|
if not (p in seen or seen_add(p)) or not is_rewritten[i]]
|
|
|
|
parents, is_rewritten = [list(x) for x in zip(*pairs)]
|
|
|
|
if len(parents) < 2:
|
|
|
|
return parents_copy, parents[0]
|
|
|
|
|
|
|
|
# Flatten unnecessary merges. (If one side of history is entirely
|
|
|
|
# empty commits that were pruned, we may end up attempting to
|
|
|
|
# merge a commit with its ancestor. Remove parents that are an
|
|
|
|
# ancestor of another parent.)
|
|
|
|
num_parents = len(parents)
|
|
|
|
to_remove = []
|
|
|
|
for cur in xrange(num_parents):
|
|
|
|
if not is_rewritten[cur]:
|
|
|
|
continue
|
|
|
|
for other in xrange(num_parents):
|
|
|
|
if cur != other and self._graph.is_ancestor(parents[cur],
|
|
|
|
parents[other]):
|
|
|
|
to_remove.append(cur)
|
|
|
|
break # cur removed, so skip rest of others -- i.e. check cur+=1
|
|
|
|
for x in reversed(to_remove):
|
|
|
|
parents.pop(x)
|
|
|
|
if len(parents) < 2:
|
|
|
|
return parents_copy, parents[0]
|
|
|
|
|
|
|
|
return parents, None
|
|
|
|
|
|
|
|
def prunable(self, commit, new_1st_parent, had_file_changes, orig_parents):
|
|
|
|
parents = [commit.from_commit] + commit.merge_commits
|
|
|
|
if not commit.from_commit:
|
|
|
|
parents = []
|
|
|
|
|
|
|
|
# For merge commits, unless there are prunable (redundant) parents, we
|
|
|
|
# do not want to prune
|
|
|
|
if len(parents) >= 2 and not new_1st_parent:
|
|
|
|
return False
|
|
|
|
|
|
|
|
if len(parents) < 2:
|
|
|
|
# Special logic for commits that started empty...
|
|
|
|
if not had_file_changes:
|
|
|
|
# If the commit remains empty and had parents pruned, then prune
|
|
|
|
# this commit; otherwise, retain it
|
|
|
|
return (not commit.file_changes and
|
|
|
|
len(parents) < len(orig_parents))
|
|
|
|
|
|
|
|
# We can only get here if the commit didn't start empty, so if it's
|
|
|
|
# empty now, it obviously became empty
|
|
|
|
if not commit.file_changes:
|
|
|
|
return True
|
|
|
|
|
|
|
|
# If there are no parents of this commit and we didn't match the case
|
|
|
|
# above, then this commit cannot be pruned. Since we have no parent(s)
|
|
|
|
# to compare to, abort now to prevent future checks from failing.
|
|
|
|
if not parents:
|
|
|
|
return False
|
|
|
|
|
|
|
|
# Similarly, we cannot handle the hard cases if we don't have a pipe
|
|
|
|
# to communicate with fast-import
|
|
|
|
if not self._fast_import_pipes:
|
|
|
|
return False
|
|
|
|
|
|
|
|
# non-merge commits can only be empty if blob/file-change editing caused
|
|
|
|
# all file changes in the commit to have the same file contents as
|
|
|
|
# the parent.
|
|
|
|
changed_files = set(change.filename for change in commit.file_changes)
|
|
|
|
if len(orig_parents) < 2 and changed_files - self._files_tweaked:
|
|
|
|
return False
|
|
|
|
|
|
|
|
# Finally, the hard case: due to either blob rewriting, or due to pruning
|
|
|
|
# of empty commits wiping out the first parent history back to the merge
|
|
|
|
# base, the list of file_changes we have may not actually differ from our
|
|
|
|
# (new) first parent's version of the files, i.e. this would actually be
|
|
|
|
# an empty commit. Check by comparing the contents of this commit to its
|
|
|
|
# (remaining) parent.
|
|
|
|
#
|
|
|
|
# NOTE on why this works, for the case of original first parent history
|
|
|
|
# having been pruned away due to being empty:
|
|
|
|
# The first parent history having been pruned away due to being
|
|
|
|
# empty implies the original first parent would have a tree (after
|
|
|
|
# filtering) that matched the merge base's tree. Since
|
|
|
|
# file_changes has the changes needed to go from what would have
|
|
|
|
# been the first parent to our new commit, and what would have been
|
|
|
|
# our first parent has a tree that matches the merge base, then if
|
|
|
|
# the new first parent has a tree matching the versions of files in
|
|
|
|
# file_changes, then this new commit is empty and thus prunable.
|
|
|
|
fi_input, fi_output = self._fast_import_pipes
|
|
|
|
self._flush_renames() # Avoid fi_output having other stuff present
|
|
|
|
# Optimization note: we could have two loops over file_changes, the
|
|
|
|
# first doing all the fi_input.write() calls, and the second doing the
|
|
|
|
# rest. But I'm worried about fast-import blocking on fi_output
|
|
|
|
# buffers filling up so I instead read from it as I go.
|
|
|
|
for change in commit.file_changes:
|
|
|
|
parent = new_1st_parent or commit.from_commit
|
|
|
|
assert parent # Should be good based on the checks above
|
|
|
|
fi_input.write("ls :{} {}\n".format(parent, change.filename))
|
|
|
|
fi_input.flush()
|
|
|
|
parent_version = fi_output.readline().split()
|
|
|
|
if change.type == 'D':
|
|
|
|
if parent_version != ['missing', change.filename]:
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
blob_sha = change.blob_id
|
|
|
|
if isinstance(change.blob_id, int):
|
|
|
|
fi_input.write("get-mark :{}\n".format(change.blob_id))
|
|
|
|
fi_input.flush()
|
|
|
|
blob_sha = fi_output.readline().rstrip()
|
|
|
|
if parent_version != [change.mode, 'blob', blob_sha, change.filename]:
|
|
|
|
return False
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
def record_remapping(self, commit, orig_parents):
|
|
|
|
new_id = None
|
|
|
|
# Record the mapping of old commit hash to new one
|
|
|
|
if commit.original_id and self._fast_import_pipes:
|
|
|
|
fi_input, fi_output = self._fast_import_pipes
|
|
|
|
fi_input.write("get-mark :{}\n".format(commit.id))
|
|
|
|
fi_input.flush()
|
|
|
|
orig_id = commit.original_id
|
|
|
|
self._commit_short_old_hashes[orig_id[0:7]].add(orig_id)
|
|
|
|
# Note that we have queued up an id for later reading; flush a
|
|
|
|
# few of the older ones if we have too many queued up
|
|
|
|
self._pending_renames[orig_id] = None
|
|
|
|
self._flush_renames(None, limit=40)
|
|
|
|
# Also, record if this was a merge commit that turned into a non-merge
|
|
|
|
# commit.
|
|
|
|
if len(orig_parents) >= 2 and not commit.merge_commits:
|
|
|
|
self._commits_no_longer_merges.append((commit.original_id, new_id))
|
|
|
|
|
filter-repo: add --analyze option
This option walks through the repository history and creates a report
with basic statistics, rename related information, and sizes of objects
and when/if those have been deleted. It primarily looks at unpacked
sizes (i.e. size of object ignoring delta-ing and compression), and
sums the size of each version of the file for each path. Additionally,
it aggregates these sums by extension and by directory, and tracks
whether paths, extensions, and directories have been deleted. This can
be very useful in determining what the big things are, and whether they
might have been considered to have been mistakes to add to the
repository in the first place.
There are numerous caveats with the determination of "deleted" and
"renamed", and can give both false positives and false negatives. But
they are only meant as a helpful heuristic to give others a starting
point for an investigation, and the information provide so far is useful.
I do want to improve the equivalence classes (rename handling), but that
is for a future commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
def num_commits_parsed(self):
|
|
|
|
return self._num_commits
|
|
|
|
|
|
|
|
def _parse_commit(self):
|
|
|
|
"""
|
|
|
|
Parse input data into a Commit object. Once the Commit has been created,
|
|
|
|
it will be handed off to the appropriate callbacks. Current-line will
|
|
|
|
be advanced until it is beyond the commit data. The Commit will be dumped
|
|
|
|
to _output once everything else is done (unless it has been skipped by
|
|
|
|
the callback OR the callback has removed all file-changes from the commit).
|
|
|
|
"""
|
|
|
|
# Parse the Commit. This may look involved, but it's pretty simple; it only
|
|
|
|
# looks bad because a commit object contains many pieces of data.
|
|
|
|
branch = self._parse_ref_line('commit')
|
|
|
|
id_ = self._parse_optional_mark()
|
|
|
|
|
|
|
|
original_id = None
|
|
|
|
if self._currentline.startswith('original-oid'):
|
|
|
|
original_id = self._parse_original_id();
|
|
|
|
|
|
|
|
author_name = None
|
|
|
|
if self._currentline.startswith('author'):
|
|
|
|
(author_name, author_email, author_date) = self._parse_user('author')
|
|
|
|
|
|
|
|
(committer_name, committer_email, committer_date) = \
|
|
|
|
self._parse_user('committer')
|
|
|
|
|
|
|
|
if not author_name:
|
|
|
|
(author_name, author_email, author_date) = \
|
|
|
|
(committer_name, committer_email, committer_date)
|
|
|
|
|
|
|
|
commit_msg = self._parse_data()
|
|
|
|
commit_msg = self._hash_re.sub(self._translate_commit_hash, commit_msg)
|
|
|
|
|
filter-repo: modify parse_optional_parent_ref to return original parent too
commits may not have any parents at all. As such,
parse_optional_parent_ref() is used expecting that it will sometimes
return None.
Now, when commits are skipped, we have a scheme to translate anyone that
depends on such commits to instead depend on the nearest ancestor of
such commits. If the entire ancestry of a commit was skipped along with
a comit, then that commit will be translated to None, which is
indistinguishable from there having been no parent to begin with.
Sometimes our scheme needs to distinguish between a commit that started
with no parents and one which ended up with no parents, so we need a way
to tell these apart.
Also, not knowing the original parent makes it hard for us to
determine if the original had the same weird topology that the current
commit does. For example, it is possible for a merge commit to have
one parent be the ancestor of another (particularly when --no-ff is
passed to git merge), or even for a merge commit to have the same
commit used as both parents (if you use low-level commands to create
a crazy commit). There are cases where the pruning of some commits
could cause either of these situations to arise, and it's useful to be
able to distinguish between intentionally "weird" history and history
that has been made weird due to other pruning, because the latter we
may have reason to do additional pruning on.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
pinfo = [self._parse_optional_parent_ref('from')]
|
|
|
|
# Due to empty pruning, we can have real 'from' and 'merge' lines that
|
|
|
|
# due to commit rewriting map to a parent of None. We need to record
|
|
|
|
# 'from' if its non-None, and we need to parse all 'merge' lines.
|
|
|
|
while self._currentline.startswith('merge '):
|
filter-repo: modify parse_optional_parent_ref to return original parent too
commits may not have any parents at all. As such,
parse_optional_parent_ref() is used expecting that it will sometimes
return None.
Now, when commits are skipped, we have a scheme to translate anyone that
depends on such commits to instead depend on the nearest ancestor of
such commits. If the entire ancestry of a commit was skipped along with
a comit, then that commit will be translated to None, which is
indistinguishable from there having been no parent to begin with.
Sometimes our scheme needs to distinguish between a commit that started
with no parents and one which ended up with no parents, so we need a way
to tell these apart.
Also, not knowing the original parent makes it hard for us to
determine if the original had the same weird topology that the current
commit does. For example, it is possible for a merge commit to have
one parent be the ancestor of another (particularly when --no-ff is
passed to git merge), or even for a merge commit to have the same
commit used as both parents (if you use low-level commands to create
a crazy commit). There are cases where the pruning of some commits
could cause either of these situations to arise, and it's useful to be
able to distinguish between intentionally "weird" history and history
that has been made weird due to other pruning, because the latter we
may have reason to do additional pruning on.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
pinfo.append(self._parse_optional_parent_ref('merge'))
|
|
|
|
orig_parents, parents = [list(tmp) for tmp in zip(*pinfo)]
|
filter-repo: avoid merging a commit with one of its own ancestors
Pruning of empty commits can cause an entire line of history to become
empty and be pruned, resulting in a merge commit that merges some commit
with one of its ancestors. In such a case, we should remove the
unnecessary parent(s) -- which can and will often result in the merge
commit being empty so we can remove it as well.
Currently, if the side that becomes empty is the first parent side, then
we do not detect if the commit becomes empty, due to the way that
fast-export lists changes in a merge commit relative to first parent only.
A subsequent commit will address this.
Note that the callbacks could theoretically insert additional commits or
reparent our commit on top of something else, meaning that the ancestry
graph might need post-callback updates. However, in any extreme case
where that mattered, we would more or less need full updates to the
ancestry graph to be made for all the new commits from the callback as
well, and once we expect the callback to handle any ancestry graph
updates it can handle modifying it for the current commit. However, it
is hard to come up with a case where it matters, since for the most part
we just want to know whether our filtering causes commits to become
empty and knowing the source repo we are exporting from is sufficient
information without knowing any new commits inserted or reparenting that
happens elsewhere.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
|
|
|
|
# Prune parents (due to pruning of empty commits) if relevant
|
|
|
|
parents, new_1st_parent = self.trim_extra_parents(orig_parents, parents)
|
|
|
|
from_commit = parents[0]
|
|
|
|
merge_commits = parents[1:]
|
|
|
|
|
|
|
|
# Get the list of file changes
|
|
|
|
file_changes = []
|
|
|
|
file_change = self._parse_optional_filechange()
|
|
|
|
had_file_changes = file_change is not None
|
|
|
|
while file_change:
|
|
|
|
if not (type(file_change) == str and file_change == 'skipped'):
|
|
|
|
file_changes.append(file_change)
|
|
|
|
file_change = self._parse_optional_filechange()
|
|
|
|
if self._currentline == '\n':
|
|
|
|
self._advance_currentline()
|
|
|
|
|
|
|
|
# Okay, now we can finally create the Commit object
|
|
|
|
commit = Commit(branch,
|
|
|
|
author_name, author_email, author_date,
|
|
|
|
committer_name, committer_email, committer_date,
|
|
|
|
commit_msg,
|
|
|
|
file_changes,
|
|
|
|
from_commit,
|
|
|
|
merge_commits,
|
|
|
|
original_id,
|
|
|
|
stream_number = _CURRENT_STREAM_NUMBER)
|
|
|
|
|
|
|
|
# If fast-export text had a mark for this commit, need to make sure this
|
|
|
|
# mark translates to the commit's true id.
|
|
|
|
if id_:
|
|
|
|
commit.old_id = id_
|
|
|
|
_IDS.record_rename(id_, commit.id)
|
|
|
|
|
filter-repo: avoid merging a commit with one of its own ancestors
Pruning of empty commits can cause an entire line of history to become
empty and be pruned, resulting in a merge commit that merges some commit
with one of its ancestors. In such a case, we should remove the
unnecessary parent(s) -- which can and will often result in the merge
commit being empty so we can remove it as well.
Currently, if the side that becomes empty is the first parent side, then
we do not detect if the commit becomes empty, due to the way that
fast-export lists changes in a merge commit relative to first parent only.
A subsequent commit will address this.
Note that the callbacks could theoretically insert additional commits or
reparent our commit on top of something else, meaning that the ancestry
graph might need post-callback updates. However, in any extreme case
where that mattered, we would more or less need full updates to the
ancestry graph to be made for all the new commits from the callback as
well, and once we expect the callback to handle any ancestry graph
updates it can handle modifying it for the current commit. However, it
is hard to come up with a case where it matters, since for the most part
we just want to know whether our filtering causes commits to become
empty and knowing the source repo we are exporting from is sufficient
information without knowing any new commits inserted or reparenting that
happens elsewhere.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
# Record ancestry graph
|
|
|
|
self._graph.add_commit_and_parents(commit.id, commit.get_parents())
|
|
|
|
|
|
|
|
# Record the original list of file changes relative to first parent
|
|
|
|
orig_file_changes = set(commit.file_changes)
|
|
|
|
|
|
|
|
# Call any user callback to allow them to modify the commit
|
|
|
|
if self._commit_callback:
|
|
|
|
self._commit_callback(commit)
|
|
|
|
if self._everything_callback:
|
|
|
|
self._everything_callback('commit', commit)
|
|
|
|
|
|
|
|
# Sanity check that user callbacks didn't violate assumption on parents
|
|
|
|
if commit.merge_commits:
|
|
|
|
assert commit.from_commit is not None
|
|
|
|
|
|
|
|
# Find out which files were modified by the callbacks. Such paths could
|
|
|
|
# lead to sebsequent commits being empty (e.g. if removed a line containing
|
|
|
|
# a password from every version of a file that had the password, and some
|
|
|
|
# later commit did nothing more than remove that line)
|
|
|
|
final_file_changes = set(commit.file_changes)
|
|
|
|
differences = orig_file_changes.symmetric_difference(final_file_changes)
|
|
|
|
self._files_tweaked.update(x.filename for x in differences)
|
|
|
|
|
|
|
|
# Now print the resulting commit, or if prunable skip it
|
|
|
|
if not commit.dumped:
|
|
|
|
if not self.prunable(commit, new_1st_parent, had_file_changes,
|
|
|
|
orig_parents):
|
|
|
|
self._seen_refs[commit.branch] = None # was seen, doesn't need reset
|
|
|
|
commit.dump(self._output)
|
|
|
|
self.record_remapping(commit, orig_parents)
|
|
|
|
else:
|
|
|
|
rewrite_to = new_1st_parent or commit.first_parent()
|
|
|
|
# We skip empty commits, but want to keep track to make sure our branch
|
|
|
|
# still gets set and/or updated appropriately.
|
|
|
|
if rewrite_to:
|
|
|
|
self._seen_refs[commit.branch] = rewrite_to # need reset
|
|
|
|
commit.skip(new_id = rewrite_to)
|
|
|
|
self._commit_renames[commit.original_id] = None
|
|
|
|
|
|
|
|
# Show progress
|
|
|
|
self._num_commits += 1
|
|
|
|
if not self._quiet:
|
|
|
|
self._progress_writer.show("Parsed {} commits".format(self._num_commits))
|
|
|
|
|
|
|
|
def _parse_tag(self):
|
|
|
|
"""
|
|
|
|
Parse input data into a Tag object. Once the Tag has been created,
|
|
|
|
it will be handed off to the appropriate callbacks. Current-line will
|
|
|
|
be advanced until it is beyond the tag data. The Tag will be dumped
|
|
|
|
to _output once everything else is done (unless it has been skipped by
|
|
|
|
the callback).
|
|
|
|
"""
|
|
|
|
# Parse the Tag
|
|
|
|
tag = self._parse_ref_line('tag')
|
filter-repo: modify parse_optional_parent_ref to return original parent too
commits may not have any parents at all. As such,
parse_optional_parent_ref() is used expecting that it will sometimes
return None.
Now, when commits are skipped, we have a scheme to translate anyone that
depends on such commits to instead depend on the nearest ancestor of
such commits. If the entire ancestry of a commit was skipped along with
a comit, then that commit will be translated to None, which is
indistinguishable from there having been no parent to begin with.
Sometimes our scheme needs to distinguish between a commit that started
with no parents and one which ended up with no parents, so we need a way
to tell these apart.
Also, not knowing the original parent makes it hard for us to
determine if the original had the same weird topology that the current
commit does. For example, it is possible for a merge commit to have
one parent be the ancestor of another (particularly when --no-ff is
passed to git merge), or even for a merge commit to have the same
commit used as both parents (if you use low-level commands to create
a crazy commit). There are cases where the pruning of some commits
could cause either of these situations to arise, and it's useful to be
able to distinguish between intentionally "weird" history and history
that has been made weird due to other pruning, because the latter we
may have reason to do additional pruning on.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
ignoreme, from_ref = self._parse_optional_parent_ref('from')
|
|
|
|
|
|
|
|
original_id = None
|
|
|
|
if self._currentline.startswith('original-oid'):
|
|
|
|
original_id = self._parse_original_id();
|
|
|
|
|
|
|
|
tagger_name, tagger_email, tagger_date = None, None, None
|
|
|
|
if self._currentline.startswith('tagger'):
|
|
|
|
(tagger_name, tagger_email, tagger_date) = self._parse_user('tagger')
|
|
|
|
tag_msg = self._parse_data()
|
|
|
|
if self._currentline == '\n':
|
|
|
|
self._advance_currentline()
|
|
|
|
|
|
|
|
# Create the tag
|
|
|
|
tag = Tag(tag, from_ref,
|
|
|
|
tagger_name, tagger_email, tagger_date, tag_msg,
|
|
|
|
original_id)
|
|
|
|
|
|
|
|
# Call any user callback to allow them to modify the tag
|
|
|
|
if self._tag_callback:
|
|
|
|
self._tag_callback(tag)
|
|
|
|
if self._everything_callback:
|
|
|
|
self._everything_callback('tag', tag)
|
|
|
|
|
|
|
|
# The tag might not point at anything that still exists ( self.from_ref
|
|
|
|
# will be None if the commit it pointed to and all its ancestors were
|
|
|
|
# pruned due to being empty)
|
|
|
|
if not tag.from_ref:
|
|
|
|
# If everything in the history of this tag was pruned, we need to delete
|
|
|
|
# the fact that it was seen so that refs_to_nuke will include it and
|
|
|
|
# wipe out the original version of that tag.
|
|
|
|
full_ref = 'refs/tags/{}'.format(tag.ref)
|
|
|
|
if full_ref in self._seen_refs:
|
|
|
|
del self._seen_refs[full_ref]
|
|
|
|
elif not tag.dumped:
|
|
|
|
tag.dump(self._output)
|
|
|
|
|
|
|
|
def _parse_progress(self):
|
|
|
|
"""
|
|
|
|
Parse input data into a Progress object. Once the Progress has
|
|
|
|
been created, it will be handed off to the appropriate
|
|
|
|
callbacks. Current-line will be advanced until it is beyond the
|
|
|
|
progress data. The Progress will be dumped to _output once
|
|
|
|
everything else is done (unless it has been skipped by the callback).
|
|
|
|
"""
|
|
|
|
# Parse the Progress
|
|
|
|
message = self._parse_ref_line('progress')
|
|
|
|
if self._currentline == '\n':
|
|
|
|
self._advance_currentline()
|
|
|
|
|
|
|
|
# Create the progress message
|
|
|
|
progress = Progress(message)
|
|
|
|
|
|
|
|
# Call any user callback to allow them to modify the progress messsage
|
|
|
|
if self._progress_callback:
|
|
|
|
self._progress_callback(progress)
|
|
|
|
if self._everything_callback:
|
|
|
|
self._everything_callback('progress', progress)
|
|
|
|
|
|
|
|
# Now print the resulting progress message
|
|
|
|
if not progress.dumped:
|
|
|
|
progress.dump(self._output)
|
|
|
|
|
|
|
|
def _parse_checkpoint(self):
|
|
|
|
"""
|
|
|
|
Parse input data into a Checkpoint object. Once the Checkpoint has
|
|
|
|
been created, it will be handed off to the appropriate
|
|
|
|
callbacks. Current-line will be advanced until it is beyond the
|
|
|
|
checkpoint data. The Checkpoint will be dumped to _output once
|
|
|
|
everything else is done (unless it has been skipped by the callback).
|
|
|
|
"""
|
|
|
|
# Parse the Checkpoint
|
|
|
|
self._advance_currentline()
|
|
|
|
if self._currentline == '\n':
|
|
|
|
self._advance_currentline()
|
|
|
|
|
|
|
|
# Create the checkpoint
|
|
|
|
checkpoint = Checkpoint()
|
|
|
|
|
|
|
|
# Call any user callback to allow them to drop the checkpoint
|
|
|
|
if self._checkpoint_callback:
|
|
|
|
self._checkpoint_callback(checkpoint)
|
|
|
|
if self._everything_callback:
|
|
|
|
self._everything_callback('checkpoint', checkpoint)
|
|
|
|
|
|
|
|
# Now print the resulting checkpoint
|
|
|
|
if not checkpoint.dumped:
|
|
|
|
checkpoint.dump(self._output)
|
|
|
|
|
|
|
|
def _parse_literal_command(self):
|
|
|
|
"""
|
|
|
|
Parse literal command. Just dump the line as is.
|
|
|
|
"""
|
|
|
|
# Create the literal command object
|
|
|
|
command = LiteralCommand(self._currentline)
|
|
|
|
self._advance_currentline()
|
|
|
|
|
|
|
|
# Now print the resulting checkpoint
|
|
|
|
if not command.dumped:
|
|
|
|
command.dump(self._output)
|
|
|
|
|
|
|
|
def _handle_final_commands(self):
|
|
|
|
self._finalize_handled = True
|
|
|
|
for ref, value in self._seen_refs.iteritems():
|
|
|
|
if value is not None:
|
|
|
|
# Create a reset
|
|
|
|
reset = Reset(ref, value)
|
|
|
|
|
|
|
|
# Call any user callback to allow them to modify the reset
|
|
|
|
if self._reset_callback:
|
|
|
|
self._reset_callback(reset)
|
|
|
|
if self._everything_callback:
|
|
|
|
self._everything_callback('reset', reset)
|
|
|
|
|
|
|
|
# Now print the resulting reset
|
|
|
|
reset.dump(self._output)
|
|
|
|
|
|
|
|
def record_metadata(self, metadata_dir, orig_refs, refs_nuked):
|
|
|
|
deleted_hash = '0'*40
|
|
|
|
self._flush_renames()
|
|
|
|
with open(os.path.join(metadata_dir, 'commit-map'), 'w') as f:
|
|
|
|
f.write("old new\n")
|
|
|
|
for (old,new) in self._commit_renames.iteritems():
|
|
|
|
f.write('{} {}\n'.format(old, new if new != None else deleted_hash))
|
|
|
|
|
|
|
|
batch_check_process = None
|
|
|
|
batch_check_output_re = re.compile('^([0-9a-f]{40}) ([a-z]+) ([0-9]+)$')
|
|
|
|
with open(os.path.join(metadata_dir, 'ref-map'), 'w') as f:
|
|
|
|
for refname, old_hash in orig_refs.iteritems():
|
|
|
|
if refname in refs_nuked:
|
|
|
|
new_hash = deleted_hash
|
|
|
|
elif old_hash in self._commit_renames:
|
|
|
|
new_hash = self._commit_renames[old_hash]
|
|
|
|
new_hash = new_hash if new_hash != None else deleted_hash
|
|
|
|
else: # Must be an annotated tag
|
|
|
|
if not batch_check_process:
|
|
|
|
cmd = 'git cat-file --batch-check'.split()
|
|
|
|
batch_check_process = subprocess.Popen(cmd,
|
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
cwd=self._repo_working_dir)
|
|
|
|
batch_check_process.stdin.write(refname+"\n")
|
|
|
|
line = batch_check_process.stdout.readline()
|
|
|
|
m = batch_check_output_re.match(line)
|
|
|
|
if not m or m.group(2) != 'tag':
|
|
|
|
raise SystemExit("Failed to find new id for {} (old id was {})"
|
|
|
|
.format(refname, old_hash))
|
|
|
|
new_hash = m.group(1)
|
|
|
|
f.write('{} {} {}\n'.format(old_hash, new_hash, refname))
|
|
|
|
if batch_check_process:
|
|
|
|
batch_check_process.stdin.close()
|
|
|
|
batch_check_process.wait()
|
|
|
|
|
|
|
|
with open(os.path.join(metadata_dir, 'suboptimal-issues'), 'w') as f:
|
|
|
|
issues_found = False
|
|
|
|
if self._commits_no_longer_merges:
|
|
|
|
issues_found = True
|
|
|
|
|
|
|
|
f.write(textwrap.dedent('''
|
|
|
|
The following commits used to be merge commits but due to filtering
|
|
|
|
are now regular commits; they likely have suboptimal commit messages
|
|
|
|
(e.g. "Merge branch next into master"). Original commit hash on the
|
|
|
|
left, commit hash after filtering/rewriting on the right:
|
|
|
|
'''[1:]))
|
|
|
|
for oldhash, newhash in self._commits_no_longer_merges:
|
|
|
|
f.write(' {} {}\n'.format(oldhash, newhash))
|
|
|
|
f.write('\n')
|
|
|
|
|
|
|
|
if self._commits_referenced_but_removed:
|
|
|
|
issues_found = True
|
|
|
|
f.write(textwrap.dedent('''
|
|
|
|
The following commits were filtered out, but referenced in another
|
|
|
|
commit message. The reference to the now-nonexistent commit hash
|
|
|
|
(or a substring thereof) was left as-is in any commit messages:
|
|
|
|
'''[1:]))
|
|
|
|
for bad_commit_reference in self._commits_referenced_but_removed:
|
|
|
|
f.write(' {}\n'.format(bad_commit_reference))
|
|
|
|
f.write('\n')
|
|
|
|
|
|
|
|
if not issues_found:
|
|
|
|
f.write("No filtering problems encountered.")
|
|
|
|
|
|
|
|
def get_seen_refs(self):
|
|
|
|
return self._seen_refs.keys()
|
|
|
|
|
|
|
|
def run(self, input, output, fast_import_pipes, quiet):
|
|
|
|
"""
|
|
|
|
This method filters fast export output.
|
|
|
|
"""
|
|
|
|
# Set input. If no args provided, use stdin.
|
|
|
|
self._input = input
|
|
|
|
self._output = output
|
|
|
|
self._fast_import_pipes = fast_import_pipes
|
|
|
|
self._quiet = quiet
|
|
|
|
|
|
|
|
# Setup some vars
|
|
|
|
global _CURRENT_STREAM_NUMBER
|
|
|
|
|
|
|
|
_CURRENT_STREAM_NUMBER += 1
|
|
|
|
if _CURRENT_STREAM_NUMBER > 1:
|
|
|
|
self._id_offset = _IDS._next_id-1
|
|
|
|
|
|
|
|
# Run over the input and do the filtering
|
|
|
|
self._advance_currentline()
|
|
|
|
while self._currentline:
|
|
|
|
if self._currentline.startswith('blob'):
|
|
|
|
self._parse_blob()
|
|
|
|
elif self._currentline.startswith('reset'):
|
|
|
|
self._parse_reset()
|
|
|
|
elif self._currentline.startswith('commit'):
|
|
|
|
self._parse_commit()
|
|
|
|
elif self._currentline.startswith('tag'):
|
|
|
|
self._parse_tag()
|
|
|
|
elif self._currentline.startswith('progress'):
|
|
|
|
self._parse_progress()
|
|
|
|
elif self._currentline.startswith('checkpoint'):
|
|
|
|
self._parse_checkpoint()
|
|
|
|
elif self._currentline.startswith('feature'):
|
|
|
|
self._parse_literal_command()
|
|
|
|
elif self._currentline.startswith('option'):
|
|
|
|
self._parse_literal_command()
|
|
|
|
elif self._currentline.startswith('done'):
|
|
|
|
self._handle_final_commands()
|
|
|
|
self._parse_literal_command()
|
|
|
|
elif self._currentline.startswith('#'):
|
|
|
|
self._parse_literal_command()
|
|
|
|
elif self._currentline.startswith('get-mark') or \
|
|
|
|
self._currentline.startswith('cat-blob') or \
|
|
|
|
self._currentline.startswith('ls'):
|
|
|
|
raise SystemExit("Unsupported command: '%s'" % self._currentline)
|
|
|
|
else:
|
|
|
|
raise SystemExit("Could not parse line: '%s'" % self._currentline)
|
|
|
|
|
|
|
|
if not self._quiet:
|
|
|
|
self._progress_writer.finish()
|
|
|
|
if not self._finalize_handled:
|
|
|
|
self._handle_final_commands()
|
|
|
|
|
|
|
|
def record_id_rename(old_id, new_id):
|
|
|
|
"""
|
|
|
|
Register a new translation
|
|
|
|
"""
|
|
|
|
handle_transitivity = True
|
|
|
|
_IDS.record_rename(old_id, new_id, handle_transitivity)
|
|
|
|
|
|
|
|
# Internal globals
|
|
|
|
_IDS = _IDs()
|
|
|
|
_EXTRA_CHANGES = {} # idnum -> list of list of FileChanges
|
|
|
|
_SKIPPED_COMMITS = set()
|
|
|
|
_CURRENT_STREAM_NUMBER = 0
|
|
|
|
|
|
|
|
class GitUtils(object):
|
|
|
|
@staticmethod
|
|
|
|
def get_commit_count(repo, *args):
|
|
|
|
"""
|
|
|
|
Return the number of commits that have been made on repo.
|
|
|
|
"""
|
|
|
|
if not args:
|
|
|
|
args = ['--all']
|
|
|
|
if len(args) == 1 and isinstance(args[0], list):
|
|
|
|
args = args[0]
|
|
|
|
p1 = subprocess.Popen(["git", "rev-list"] + args,
|
|
|
|
bufsize=-1,
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
|
|
|
cwd=repo)
|
|
|
|
p2 = subprocess.Popen(["wc", "-l"], stdin=p1.stdout, stdout=subprocess.PIPE)
|
|
|
|
count = int(p2.communicate()[0])
|
|
|
|
if p1.poll() != 0:
|
|
|
|
raise SystemExit("%s does not appear to be a valid git repository" % repo)
|
|
|
|
return count
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def get_total_objects(repo):
|
|
|
|
"""
|
|
|
|
Return the number of objects (both packed and unpacked)
|
|
|
|
"""
|
|
|
|
p1 = subprocess.Popen(["git", "count-objects", "-v"],
|
|
|
|
stdout=subprocess.PIPE, cwd=repo)
|
|
|
|
lines = p1.stdout.read().splitlines()
|
|
|
|
# Return unpacked objects + packed-objects
|
|
|
|
return int(lines[0].split()[1]) + int(lines[2].split()[1])
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def is_repository_bare(repo_working_dir):
|
|
|
|
out = subprocess.check_output('git rev-parse --is-bare-repository'.split(),
|
|
|
|
cwd=repo_working_dir)
|
|
|
|
return (out.strip() == 'true')
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def determine_git_dir(repo_working_dir):
|
|
|
|
d = subprocess.check_output('git rev-parse --git-dir'.split(),
|
|
|
|
cwd=repo_working_dir).strip()
|
|
|
|
if repo_working_dir=='.' or d.startswith('/'):
|
|
|
|
return d
|
|
|
|
return os.path.join(repo_working_dir, d)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def get_refs(repo_working_dir):
|
|
|
|
try:
|
|
|
|
output = subprocess.check_output('git show-ref'.split(),
|
|
|
|
cwd=repo_working_dir)
|
|
|
|
except subprocess.CalledProcessError as e:
|
|
|
|
# If error code is 1, there just aren't any refs; i.e. new repo.
|
|
|
|
# If error code is other than 1, some other error (e.g. not a git repo)
|
|
|
|
if e.returncode != 1:
|
|
|
|
raise SystemExit('fatal: {}'.format(e))
|
|
|
|
output = ''
|
|
|
|
return dict(reversed(x.split()) for x in output.splitlines())
|
|
|
|
|
|
|
|
class FilteringOptions(object):
|
|
|
|
class AppendFilter(argparse.Action):
|
|
|
|
def __call__(self, parser, namespace, values, option_string=None):
|
|
|
|
suffix = option_string[len('--path-'):] or 'match'
|
|
|
|
if suffix == 'rename':
|
|
|
|
mod_type = 'rename'
|
|
|
|
match_type = 'prefix'
|
|
|
|
elif suffix.startswith('rename-'):
|
|
|
|
mod_type = 'rename'
|
|
|
|
match_type = suffix[len('rename-'):]
|
|
|
|
else:
|
|
|
|
mod_type = 'filter'
|
|
|
|
match_type = suffix
|
|
|
|
if match_type == 'regex':
|
|
|
|
values = re.compile(values)
|
|
|
|
items = getattr(namespace, self.dest, []) or []
|
|
|
|
items.append((mod_type, match_type, values))
|
|
|
|
setattr(namespace, self.dest, items)
|
|
|
|
|
|
|
|
class HelperFilter(argparse.Action):
|
|
|
|
def __call__(self, parser, namespace, values, option_string=None):
|
|
|
|
af = FilteringOptions.AppendFilter(dest='path_changes',
|
|
|
|
option_strings=None)
|
|
|
|
dirname = values if values[-1] == '/' else values+'/'
|
|
|
|
if option_string == '--subdirectory-filter':
|
|
|
|
af(parser, namespace, dirname, '--path-match')
|
|
|
|
af(parser, namespace, dirname+':', '--path-rename')
|
|
|
|
elif option_string == '--to-subdirectory-filter':
|
|
|
|
af(parser, namespace, ':'+dirname, '--path-rename')
|
|
|
|
else:
|
|
|
|
raise SystemExit("Error: HelperFilter given invalid option_string: {}"
|
|
|
|
.format(option_string))
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def create_arg_parser():
|
|
|
|
# Include usage in the summary, so we can put the description first
|
|
|
|
summary = '''Rewrite (or analyze) repository history
|
|
|
|
|
|
|
|
git-filter-repo destructively rewrites history (unless --analyze or --dry-run
|
|
|
|
are specified) according to specified rules. It refuses to do any rewriting
|
|
|
|
unless either run from a clean fresh clone, or --force was specified.
|
|
|
|
|
|
|
|
Basic Usage:
|
|
|
|
git-filter-repo --analyze
|
|
|
|
git-filter-repo [FILTER/RENAME/CONTROL OPTIONS]
|
|
|
|
|
|
|
|
See EXAMPLES section for details.
|
|
|
|
'''.rstrip()
|
|
|
|
|
|
|
|
# Provide a long helpful examples section
|
|
|
|
example_text = '''EXAMPLES
|
|
|
|
|
|
|
|
To get help:
|
|
|
|
git-filter-repo --help
|
|
|
|
'''
|
|
|
|
|
|
|
|
# Create the basic parser
|
|
|
|
parser = argparse.ArgumentParser(description=summary,
|
|
|
|
usage = argparse.SUPPRESS,
|
|
|
|
add_help = False,
|
|
|
|
epilog = example_text,
|
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter)
|
|
|
|
|
|
|
|
analyze = parser.add_argument_group(title='Analysis')
|
|
|
|
analyze.add_argument('--analyze', action='store_true',
|
|
|
|
help='''Analyze repository history and create a
|
|
|
|
report that may be useful in determining
|
|
|
|
what to filter in a subsequent run. Will
|
|
|
|
not modify your repo.''')
|
|
|
|
|
|
|
|
refs = parser.add_argument_group(title='Git References')
|
|
|
|
refs.add_argument('--refs', action='store_const', const=['--all'],
|
|
|
|
default=['--all'], help=argparse.SUPPRESS)
|
|
|
|
|
|
|
|
path = parser.add_argument_group(title='Filtering based on paths')
|
|
|
|
path.add_argument('--invert-paths', action='store_false',
|
|
|
|
dest='inclusive',
|
|
|
|
help='''Invert the selection of files from the specified
|
|
|
|
--path-{match,glob,regex} options below, i.e. only
|
|
|
|
select files matching none of those options.''')
|
|
|
|
|
|
|
|
path.add_argument('--path-match', '--path', metavar='DIR_OR_FILE',
|
|
|
|
action=FilteringOptions.AppendFilter, dest='path_changes',
|
|
|
|
help='''Exact paths (files or directories) to include in
|
|
|
|
filtered history. Multiple --path options can be
|
|
|
|
specified to get a union of paths.''')
|
|
|
|
path.add_argument('--path-glob', metavar='GLOB',
|
|
|
|
action=FilteringOptions.AppendFilter, dest='path_changes',
|
|
|
|
help='''Glob of paths to include in filtered history.
|
|
|
|
Multiple --path-glob options can be specified to
|
|
|
|
get a union of paths.''')
|
|
|
|
path.add_argument('--path-regex', metavar='REGEX',
|
|
|
|
action=FilteringOptions.AppendFilter, dest='path_changes',
|
|
|
|
help='''Regex of paths to include in filtered history.
|
|
|
|
Multiple --path-regex options can be specified to
|
|
|
|
get a union of paths''')
|
|
|
|
|
|
|
|
rename = parser.add_argument_group(title='Renaming based on paths')
|
|
|
|
rename.add_argument('--path-rename', '--path-rename-prefix',
|
|
|
|
metavar='OLD_NAME:NEW_NAME',
|
|
|
|
action=FilteringOptions.AppendFilter,
|
|
|
|
dest='path_changes',
|
|
|
|
help='''Prefix to rename; if filename starts with
|
|
|
|
OLD_NAME, replace that with NEW_NAME. Multiple
|
|
|
|
--path-rename options can be specified.''')
|
|
|
|
|
|
|
|
refrename = parser.add_argument_group(title='Renaming of refs')
|
|
|
|
refrename.add_argument('--tag-rename', metavar='OLD:NEW',
|
|
|
|
help='''Rename tags starting with OLD to start with
|
|
|
|
NEW. e.g. --tag-rename foo:bar will rename
|
|
|
|
tag foo-1.2.3 to bar-1.2.3; either OLD or NEW
|
|
|
|
can be empty.''')
|
|
|
|
|
|
|
|
helpers = parser.add_argument_group(title='Shortcuts')
|
|
|
|
helpers.add_argument('--subdirectory-filter', metavar='DIRECTORY',
|
|
|
|
action=FilteringOptions.HelperFilter,
|
|
|
|
help='''Only look at history that touches the given
|
|
|
|
subdirectory and treat that directory as the
|
|
|
|
project root. Equivalent to using
|
|
|
|
"--path DIRECTORY/ --path-rename DIRECTORY/:"
|
|
|
|
''')
|
|
|
|
helpers.add_argument('--to-subdirectory-filter', metavar='DIRECTORY',
|
|
|
|
action=FilteringOptions.HelperFilter,
|
|
|
|
help='''Treat the project root as instead being under
|
|
|
|
DIRECTORY. Equivalent to using
|
|
|
|
"--path-rename :DIRECTORY/"''')
|
|
|
|
|
|
|
|
people = parser.add_argument_group(title='Filtering of names/emails')
|
|
|
|
people.add_argument('--mailmap', dest='mailmap', metavar='FILENAME',
|
|
|
|
help='''Use specified mailmap file (see git-shortlog(1)
|
|
|
|
for details on the format) when rewriting
|
|
|
|
author, committer, and tagger names and
|
|
|
|
emails. If the specified file is part of git
|
|
|
|
history, historical versions of the file will
|
|
|
|
be ignored; only the current contents are
|
|
|
|
consulted.''')
|
|
|
|
people.add_argument('--use-mailmap', dest='mailmap',
|
|
|
|
action='store_const', const='.mailmap',
|
|
|
|
help='''Same as: '--mailmap .mailmap' ''')
|
|
|
|
|
filter-repo: add text removal (or replacement) via file of expressions
Make it easy for users to search and replace text throughout the
repository history. Instead of inventing some new syntax, reuse the
same syntax used by BFG repo filter's --replace-text option, namely,
a file with one expression per line of the form
[regex:|glob:|literal:]$MATCH_EXPR[==>$REPLACEMENT_EXPR]
Where "$MATCH_EXPR" is by default considered to be literal text, but
could be a regex or a glob if the appropriate prefix is used. Also,
$REPLACEMENT_EXPR defaults to '***REMOVED***' if not specified. If
you want a literal '==>' to be part of your $MATCH_EXPR, then you
must also manually specify a replacement expression instead of taking
the default. Some examples:
sup3rs3kr3t
(replaces 'sup3rs3kr3t' with '***REMOVED***')
HeWhoShallNotBeNamed==>Voldemort
(replaces 'HeWhoShallNotBeNamed' with 'Voldemort')
very==>
(replaces 'very' with the empty string)
regex:(\d{2})/(\d{2})/(\d{4})==>\2/\1/\3
(replaces '05/17/2012' with '17/05/2012', and vice-versa)
The format for regex is as from
re.sub(<pattern>, <repl>, <string>) from
https://docs.python.org/2/library/re.html
The <string> comes from file contents of the repo, and you specify
the <pattern> and <repl>.
glob:Copy*t==>Cartel
(replaces 'Copyright' or 'Copyleft' or 'Copy my st' with 'Cartel')
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
contents = parser.add_argument_group(title='Content editing filters')
|
|
|
|
contents.add_argument('--replace-text', metavar='EXPRESSIONS_FILE',
|
|
|
|
help='''A file with expressions that, if found, will
|
|
|
|
be replaced. By default, each expression is
|
|
|
|
treated as literal text, but 'regex:' and 'glob:'
|
|
|
|
prefixes are supported. You can end the line
|
|
|
|
with "==>" and some replacement text to choose
|
|
|
|
a replacement choice other than the default of
|
|
|
|
"***REMOVED***". ''')
|
|
|
|
|
|
|
|
location = parser.add_argument_group(title='Location to filter from/to')
|
|
|
|
location.add_argument('--source',
|
|
|
|
help='''Git repository to read from''')
|
|
|
|
location.add_argument('--target',
|
|
|
|
help='''Git repository to overwrite with filtered
|
|
|
|
history''')
|
|
|
|
|
|
|
|
misc = parser.add_argument_group(title='Miscellaneous options')
|
|
|
|
misc.add_argument('--help', '-h', action='store_true',
|
|
|
|
help='''Show this help message and exit.''')
|
|
|
|
misc.add_argument('--force', '-f', action='store_true',
|
|
|
|
help='''Rewrite history even if the current repo does not
|
|
|
|
look like a fresh clone.''')
|
|
|
|
|
|
|
|
misc.add_argument('--dry-run', action='store_true',
|
|
|
|
help='''Do not change the repository. Run `git
|
|
|
|
fast-export` and filter its output, and save both
|
|
|
|
the original and the filtered version for
|
|
|
|
comparison. Some filtering of empty commits may
|
|
|
|
not occur due to inability to query the fast-import
|
|
|
|
backend.''')
|
|
|
|
misc.add_argument('--debug', action='store_true',
|
|
|
|
help='''Print additional information about operations being
|
|
|
|
performed and commands being run. When used
|
|
|
|
together with --dry-run, also show extra
|
|
|
|
information about what would be run.''')
|
|
|
|
misc.add_argument('--stdin', action='store_true',
|
|
|
|
help='''Instead of running `git fast-export` and filtering
|
|
|
|
its output, filter the fast-export stream from
|
|
|
|
stdin.''')
|
|
|
|
misc.add_argument('--quiet', action='store_true',
|
|
|
|
help='''Pass --quiet to other git commands called''')
|
|
|
|
return parser
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def sanity_check_args(args):
|
|
|
|
if args.analyze and args.path_changes:
|
|
|
|
raise SystemExit("Error: --analyze is incompatible with --path* flags; "
|
|
|
|
"it's a read-only operation.")
|
|
|
|
if args.analyze and args.stdin:
|
|
|
|
raise SystemExit("Error: --analyze is incompatible with --stdin.")
|
|
|
|
# If no path_changes are found, initialize with empty list but mark as
|
|
|
|
# not inclusive so that all files match
|
|
|
|
if args.path_changes == None:
|
|
|
|
args.path_changes = []
|
|
|
|
args.inclusive = False
|
|
|
|
# Similarly, if we only have renames, all paths should match
|
|
|
|
else:
|
|
|
|
has_filter = False
|
|
|
|
for (mod_type, match_type, path_expression) in args.path_changes:
|
|
|
|
if mod_type == 'filter':
|
|
|
|
has_filter = True
|
|
|
|
if not has_filter:
|
|
|
|
args.inclusive = False
|
|
|
|
# Also throw in a sanity check on git version here;
|
|
|
|
# PERF: remove this check once new enough git versions are common
|
|
|
|
p = subprocess.Popen('git diff-tree -h'.split(),
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
|
|
|
p.wait()
|
|
|
|
output = p.stdout.read()
|
|
|
|
if '--combined-all-paths' not in output:
|
|
|
|
raise SystemExit("Error: need a version of git whose diff-tree command "
|
|
|
|
"has the --combined-all-paths option")
|
|
|
|
|
filter-repo: add text removal (or replacement) via file of expressions
Make it easy for users to search and replace text throughout the
repository history. Instead of inventing some new syntax, reuse the
same syntax used by BFG repo filter's --replace-text option, namely,
a file with one expression per line of the form
[regex:|glob:|literal:]$MATCH_EXPR[==>$REPLACEMENT_EXPR]
Where "$MATCH_EXPR" is by default considered to be literal text, but
could be a regex or a glob if the appropriate prefix is used. Also,
$REPLACEMENT_EXPR defaults to '***REMOVED***' if not specified. If
you want a literal '==>' to be part of your $MATCH_EXPR, then you
must also manually specify a replacement expression instead of taking
the default. Some examples:
sup3rs3kr3t
(replaces 'sup3rs3kr3t' with '***REMOVED***')
HeWhoShallNotBeNamed==>Voldemort
(replaces 'HeWhoShallNotBeNamed' with 'Voldemort')
very==>
(replaces 'very' with the empty string)
regex:(\d{2})/(\d{2})/(\d{4})==>\2/\1/\3
(replaces '05/17/2012' with '17/05/2012', and vice-versa)
The format for regex is as from
re.sub(<pattern>, <repl>, <string>) from
https://docs.python.org/2/library/re.html
The <string> comes from file contents of the repo, and you specify
the <pattern> and <repl>.
glob:Copy*t==>Cartel
(replaces 'Copyright' or 'Copyleft' or 'Copy my st' with 'Cartel')
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
@staticmethod
|
|
|
|
def get_replace_text(filename):
|
|
|
|
replace_literals = []
|
|
|
|
replace_regexes = []
|
|
|
|
with open(filename) as f:
|
|
|
|
for line in f:
|
|
|
|
line = line.rstrip('\r\n')
|
|
|
|
|
|
|
|
# Determine the replacement
|
|
|
|
replacement = '***REMOVED***'
|
|
|
|
if '==>' in line:
|
|
|
|
line, replacement = line.rsplit('==>', 1)
|
|
|
|
|
|
|
|
# See if we need to match via regex
|
|
|
|
regex = None
|
|
|
|
if line.startswith('regex:'):
|
|
|
|
regex = line[6:]
|
|
|
|
elif line.startswith('glob:'):
|
|
|
|
regex = fnmatch.translate(line[5:])
|
|
|
|
if regex.endswith(r'\Z(?ms)'):
|
|
|
|
regex = regex[0:-7]
|
|
|
|
if regex:
|
|
|
|
replace_regexes.append((re.compile(regex), replacement))
|
|
|
|
else:
|
|
|
|
# Otherwise, find the literal we need to replace
|
|
|
|
if line.startswith('literal:'):
|
|
|
|
line = line[8:]
|
|
|
|
if not line:
|
|
|
|
continue
|
|
|
|
replace_literals.append((line, replacement))
|
|
|
|
return {'literals': replace_literals, 'regexes': replace_regexes}
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def default_options():
|
|
|
|
return FilteringOptions.parse_args([], error_on_empty = False)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def parse_args(input_args, error_on_empty = True):
|
|
|
|
parser = FilteringOptions.create_arg_parser()
|
|
|
|
if not input_args and error_on_empty:
|
|
|
|
parser.print_usage()
|
|
|
|
raise SystemExit("No arguments specified.")
|
|
|
|
args = parser.parse_args(input_args)
|
|
|
|
if args.help:
|
|
|
|
parser.print_help()
|
|
|
|
raise SystemExit()
|
|
|
|
FilteringOptions.sanity_check_args(args)
|
|
|
|
if args.mailmap:
|
|
|
|
args.mailmap = MailmapInfo(args.mailmap)
|
filter-repo: add text removal (or replacement) via file of expressions
Make it easy for users to search and replace text throughout the
repository history. Instead of inventing some new syntax, reuse the
same syntax used by BFG repo filter's --replace-text option, namely,
a file with one expression per line of the form
[regex:|glob:|literal:]$MATCH_EXPR[==>$REPLACEMENT_EXPR]
Where "$MATCH_EXPR" is by default considered to be literal text, but
could be a regex or a glob if the appropriate prefix is used. Also,
$REPLACEMENT_EXPR defaults to '***REMOVED***' if not specified. If
you want a literal '==>' to be part of your $MATCH_EXPR, then you
must also manually specify a replacement expression instead of taking
the default. Some examples:
sup3rs3kr3t
(replaces 'sup3rs3kr3t' with '***REMOVED***')
HeWhoShallNotBeNamed==>Voldemort
(replaces 'HeWhoShallNotBeNamed' with 'Voldemort')
very==>
(replaces 'very' with the empty string)
regex:(\d{2})/(\d{2})/(\d{4})==>\2/\1/\3
(replaces '05/17/2012' with '17/05/2012', and vice-versa)
The format for regex is as from
re.sub(<pattern>, <repl>, <string>) from
https://docs.python.org/2/library/re.html
The <string> comes from file contents of the repo, and you specify
the <pattern> and <repl>.
glob:Copy*t==>Cartel
(replaces 'Copyright' or 'Copyleft' or 'Copy my st' with 'Cartel')
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
if args.replace_text:
|
|
|
|
args.replace_text = FilteringOptions.get_replace_text(args.replace_text)
|
|
|
|
return args
|
|
|
|
|
|
|
|
class RepoAnalyze(object):
|
|
|
|
|
|
|
|
# First, several helper functions for analyze_commit()
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def equiv_class(stats, filename):
|
|
|
|
return stats['equivalence'].get(filename, (filename,))
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def setup_equivalence_for_rename(stats, oldname, newname):
|
|
|
|
# if A is renamed to B and B is renamed to C, then the user thinks of
|
|
|
|
# A, B, and C as all being different names for the same 'file'. We record
|
|
|
|
# this as an equivalence class:
|
|
|
|
# stats['equivalence'][name] = (A,B,C)
|
|
|
|
# for name being each of A, B, and C.
|
|
|
|
old_tuple = stats['equivalence'].get(oldname, ())
|
|
|
|
if newname in old_tuple:
|
|
|
|
return
|
|
|
|
elif old_tuple:
|
|
|
|
new_tuple = tuple(list(old_tuple)+[newname])
|
|
|
|
else:
|
|
|
|
new_tuple = (oldname, newname)
|
|
|
|
for f in new_tuple:
|
|
|
|
stats['equivalence'][f] = new_tuple
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def setup_or_update_rename_history(stats, commit, oldname, newname):
|
|
|
|
rename_commits = stats['rename_history'].get(oldname, set())
|
|
|
|
rename_commits.add(commit)
|
|
|
|
stats['rename_history'][oldname] = rename_commits
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def handle_renames(stats, commit, change_types, filenames):
|
|
|
|
for index, change_type in enumerate(change_types):
|
|
|
|
if change_type == 'R':
|
|
|
|
oldname, newname = filenames[index], filenames[-1]
|
|
|
|
RepoAnalyze.setup_equivalence_for_rename(stats, oldname, newname)
|
|
|
|
RepoAnalyze.setup_or_update_rename_history(stats, commit,
|
|
|
|
oldname, newname)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def handle_file(stats, graph, commit, modes, shas, filenames):
|
|
|
|
mode, sha, filename = modes[-1], shas[-1], filenames[-1]
|
|
|
|
|
|
|
|
# Figure out kind of deletions to undo for this file, and update lists
|
|
|
|
# of all-names-by-sha and all-filenames
|
|
|
|
delmode = 'tree_deletions'
|
|
|
|
if mode != '040000':
|
|
|
|
delmode = 'file_deletions'
|
|
|
|
stats['names'][sha].add(filename)
|
|
|
|
stats['allnames'].add(filename)
|
|
|
|
|
|
|
|
# If the file (or equivalence class of files) was recorded as deleted,
|
|
|
|
# clearly it isn't anymore
|
|
|
|
equiv = RepoAnalyze.equiv_class(stats, filename)
|
|
|
|
for f in equiv:
|
|
|
|
stats[delmode].pop(f, None)
|
|
|
|
|
|
|
|
# If we get a modify/add for a path that was renamed, we may need to break
|
|
|
|
# the equivalence class. However, if the modify/add was on a branch that
|
|
|
|
# doesn't have the rename in its history, we are still okay.
|
|
|
|
need_to_break_equivalence = False
|
|
|
|
if equiv[-1] != filename:
|
|
|
|
for rename_commit in stats['rename_history'][filename]:
|
|
|
|
if graph.is_ancestor(rename_commit, commit):
|
|
|
|
need_to_break_equivalence = True
|
|
|
|
|
|
|
|
if need_to_break_equivalence:
|
filter-repo: add --analyze option
This option walks through the repository history and creates a report
with basic statistics, rename related information, and sizes of objects
and when/if those have been deleted. It primarily looks at unpacked
sizes (i.e. size of object ignoring delta-ing and compression), and
sums the size of each version of the file for each path. Additionally,
it aggregates these sums by extension and by directory, and tracks
whether paths, extensions, and directories have been deleted. This can
be very useful in determining what the big things are, and whether they
might have been considered to have been mistakes to add to the
repository in the first place.
There are numerous caveats with the determination of "deleted" and
"renamed", and can give both false positives and false negatives. But
they are only meant as a helpful heuristic to give others a starting
point for an investigation, and the information provide so far is useful.
I do want to improve the equivalence classes (rename handling), but that
is for a future commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
for f in equiv:
|
|
|
|
if f in stats['equivalence']:
|
|
|
|
del stats['equivalence'][f]
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def analyze_commit(stats, graph, commit, parents, date, file_changes):
|
|
|
|
graph.add_commit_and_parents(commit, parents)
|
|
|
|
for change in file_changes:
|
|
|
|
modes, shas, change_types, filenames = change
|
|
|
|
if len(parents) == 1 and change_types.startswith('R'):
|
|
|
|
change_types = 'R' # remove the rename score; we don't care
|
|
|
|
if modes[-1] == '160000':
|
filter-repo: add --analyze option
This option walks through the repository history and creates a report
with basic statistics, rename related information, and sizes of objects
and when/if those have been deleted. It primarily looks at unpacked
sizes (i.e. size of object ignoring delta-ing and compression), and
sums the size of each version of the file for each path. Additionally,
it aggregates these sums by extension and by directory, and tracks
whether paths, extensions, and directories have been deleted. This can
be very useful in determining what the big things are, and whether they
might have been considered to have been mistakes to add to the
repository in the first place.
There are numerous caveats with the determination of "deleted" and
"renamed", and can give both false positives and false negatives. But
they are only meant as a helpful heuristic to give others a starting
point for an investigation, and the information provide so far is useful.
I do want to improve the equivalence classes (rename handling), but that
is for a future commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
continue
|
|
|
|
elif modes[-1] == '000000':
|
|
|
|
# Track when files/directories are deleted
|
|
|
|
for f in RepoAnalyze.equiv_class(stats, filenames[-1]):
|
|
|
|
if any(x == '040000' for x in modes[0:-1]):
|
|
|
|
stats['tree_deletions'][f] = date
|
|
|
|
else:
|
|
|
|
stats['file_deletions'][f] = date
|
|
|
|
elif change_types.strip('AMT') == '':
|
|
|
|
RepoAnalyze.handle_file(stats, graph, commit, modes, shas, filenames)
|
|
|
|
elif modes[-1] == '040000' and change_types.strip('RAM') == '':
|
|
|
|
RepoAnalyze.handle_file(stats, graph, commit, modes, shas, filenames)
|
|
|
|
elif change_types.strip('RAM') == '':
|
|
|
|
RepoAnalyze.handle_file(stats, graph, commit, modes, shas, filenames)
|
|
|
|
RepoAnalyze.handle_renames(stats, commit, change_types, filenames)
|
|
|
|
else:
|
|
|
|
raise SystemExit("Unhandled change type(s): {} (in commit {})"
|
|
|
|
.format(change_types, commit))
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def gather_data(args):
|
|
|
|
blob_size_progress = ProgressWriter()
|
|
|
|
num_blobs = 0
|
|
|
|
|
|
|
|
# Get sizes of blobs by sha1
|
|
|
|
cmd = '--batch-check=%(objectname) %(objecttype) ' + \
|
|
|
|
'%(objectsize) %(objectsize:disk)'
|
|
|
|
cf = subprocess.Popen(['git', 'cat-file', '--batch-all-objects', cmd],
|
|
|
|
bufsize = -1,
|
|
|
|
stdout = subprocess.PIPE)
|
|
|
|
unpacked_size = {}
|
|
|
|
packed_size = {}
|
|
|
|
for line in cf.stdout:
|
|
|
|
sha, objtype, objsize, objdisksize = line.split()
|
|
|
|
objsize, objdisksize = int(objsize), int(objdisksize)
|
|
|
|
if objtype == 'blob':
|
|
|
|
unpacked_size[sha] = objsize
|
|
|
|
packed_size[sha] = objdisksize
|
|
|
|
num_blobs += 1
|
|
|
|
blob_size_progress.show("Processed {} blob sizes".format(num_blobs))
|
|
|
|
cf.wait()
|
|
|
|
blob_size_progress.finish()
|
|
|
|
stats = {'names': collections.defaultdict(set),
|
|
|
|
'allnames' : set(),
|
|
|
|
'file_deletions': {},
|
|
|
|
'tree_deletions': {},
|
|
|
|
'equivalence': {},
|
|
|
|
'rename_history': collections.defaultdict(set),
|
|
|
|
'unpacked_size': unpacked_size,
|
|
|
|
'packed_size': packed_size,
|
|
|
|
'num_commits': 0}
|
|
|
|
|
|
|
|
# Setup the rev-list/diff-tree process
|
|
|
|
commit_parse_progress = ProgressWriter()
|
|
|
|
num_commits = 0
|
|
|
|
cmd = ('git rev-list --topo-order --reverse {}'.format(' '.join(args.refs)) +
|
|
|
|
' | git diff-tree --stdin --always --root --format=%H%n%P%n%cd' +
|
|
|
|
' --date=short -M -t -c --raw --combined-all-paths')
|
|
|
|
dtp = subprocess.Popen(cmd, shell=True, bufsize=-1, stdout=subprocess.PIPE)
|
|
|
|
f = dtp.stdout
|
|
|
|
line = f.next()
|
|
|
|
cont = bool(line)
|
|
|
|
graph = AncestryGraph()
|
|
|
|
while cont:
|
|
|
|
commit = line.rstrip()
|
|
|
|
parents = f.next().split()
|
|
|
|
date = f.next().rstrip()
|
|
|
|
|
|
|
|
# We expect a blank line next; if we get a non-blank line then
|
|
|
|
# this commit modified no files and we need to move on to the next.
|
|
|
|
# If there is no line, we've reached end-of-input.
|
|
|
|
try:
|
|
|
|
line = f.next().rstrip()
|
|
|
|
cont = True
|
|
|
|
except StopIteration:
|
|
|
|
cont = False
|
|
|
|
|
|
|
|
# If we haven't reached end of input, and we got a blank line meaning
|
|
|
|
# a commit that has modified files, then get the file changes associated
|
|
|
|
# with this commit.
|
|
|
|
file_changes = []
|
|
|
|
if cont and not line:
|
|
|
|
cont = False
|
|
|
|
for line in f:
|
|
|
|
if not line.startswith(':'):
|
|
|
|
cont = True
|
|
|
|
break
|
|
|
|
n = 1+max(1, len(parents))
|
|
|
|
assert line.startswith(':'*(n-1))
|
|
|
|
relevant = line[n-1:-1]
|
|
|
|
splits = relevant.split(None, n)
|
|
|
|
modes = splits[0:n]
|
|
|
|
splits = splits[n].split(None, n)
|
|
|
|
shas = splits[0:n]
|
|
|
|
splits = splits[n].split('\t')
|
|
|
|
change_types = splits[0]
|
|
|
|
filenames = [PathQuoting.dequote(x) for x in splits[1:]]
|
|
|
|
file_changes.append([modes, shas, change_types, filenames])
|
|
|
|
|
|
|
|
# Analyze this commit and update progress
|
|
|
|
RepoAnalyze.analyze_commit(stats, graph, commit, parents, date,
|
|
|
|
file_changes)
|
|
|
|
num_commits += 1
|
|
|
|
commit_parse_progress.show("Processed {} commits".format(num_commits))
|
|
|
|
|
|
|
|
# Show the final commits processed message and record the number of commits
|
|
|
|
commit_parse_progress.finish()
|
|
|
|
stats['num_commits'] = num_commits
|
|
|
|
|
|
|
|
# Close the output, ensure rev-list|diff-tree pipeline completed successfully
|
|
|
|
dtp.stdout.close()
|
|
|
|
if dtp.wait():
|
|
|
|
raise SystemExit("Error: rev-list|diff-tree pipeline failed; see above.")
|
|
|
|
|
|
|
|
return stats
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def write_report(reportdir, stats):
|
|
|
|
def datestr(datetimestr):
|
|
|
|
return datetimestr if datetimestr else '<present>'
|
|
|
|
|
|
|
|
def dirnames(path):
|
|
|
|
while True:
|
|
|
|
path = os.path.dirname(path)
|
|
|
|
yield path
|
|
|
|
if path == '':
|
|
|
|
break
|
|
|
|
|
|
|
|
# Compute aggregate size information for paths, extensions, and dirs
|
|
|
|
total_size = {'packed': 0, 'unpacked': 0}
|
|
|
|
path_size = {'packed': collections.defaultdict(int),
|
|
|
|
'unpacked': collections.defaultdict(int)}
|
|
|
|
ext_size = {'packed': collections.defaultdict(int),
|
|
|
|
'unpacked': collections.defaultdict(int)}
|
|
|
|
dir_size = {'packed': collections.defaultdict(int),
|
|
|
|
'unpacked': collections.defaultdict(int)}
|
|
|
|
for sha in stats['names']:
|
|
|
|
size = {'packed': stats['packed_size'][sha],
|
|
|
|
'unpacked': stats['unpacked_size'][sha]}
|
|
|
|
for which in ('packed', 'unpacked'):
|
|
|
|
for name in stats['names'][sha]:
|
|
|
|
total_size[which] += size[which]
|
|
|
|
path_size[which][name] += size[which]
|
|
|
|
basename, ext = os.path.splitext(name)
|
|
|
|
ext_size[which][ext] += size[which]
|
|
|
|
for dirname in dirnames(name):
|
|
|
|
dir_size[which][dirname] += size[which]
|
|
|
|
|
|
|
|
# Determine if and when extensions and directories were deleted
|
|
|
|
ext_deleted_data = {}
|
|
|
|
for name in stats['allnames']:
|
|
|
|
when = stats['file_deletions'].get(name, None)
|
|
|
|
|
|
|
|
# Update the extension
|
|
|
|
basename, ext = os.path.splitext(name)
|
|
|
|
if when is None:
|
|
|
|
ext_deleted_data[ext] = None
|
|
|
|
elif ext in ext_deleted_data:
|
|
|
|
if ext_deleted_data[ext] is not None:
|
|
|
|
ext_deleted_data[ext] = max(ext_deleted_data[ext], when)
|
|
|
|
else:
|
|
|
|
ext_deleted_data[ext] = when
|
|
|
|
|
|
|
|
dir_deleted_data = {}
|
|
|
|
for name in dir_size['packed']:
|
|
|
|
dir_deleted_data[name] = stats['tree_deletions'].get(name, None)
|
|
|
|
|
|
|
|
with open(os.path.join(reportdir, "README"), 'w') as f:
|
|
|
|
# Give a basic overview of this file
|
|
|
|
f.write("== Overal Statistics ==\n")
|
|
|
|
f.write(" Number of commits: {}\n".format(stats['num_commits']))
|
|
|
|
f.write(" Number of filenames: {}\n".format(len(path_size['packed'])))
|
|
|
|
f.write(" Number of directories: {}\n".format(len(dir_size['packed'])))
|
|
|
|
f.write(" Number of file extensions: {}\n".format(len(ext_size['packed'])))
|
|
|
|
f.write("\n")
|
|
|
|
f.write(" Total unpacked size (bytes): {:10d}\n"
|
|
|
|
.format(total_size['unpacked']))
|
|
|
|
f.write(" Total packed size (bytes): {:10d}\n"
|
|
|
|
.format(total_size['packed']))
|
|
|
|
f.write("\n")
|
|
|
|
|
|
|
|
# Mention issues with the report
|
|
|
|
f.write("== Caveats ==\n")
|
|
|
|
f.write("=== Sizes ===\n")
|
|
|
|
f.write(textwrap.dedent("""
|
|
|
|
Packed size represents what size your repository would be if no
|
|
|
|
trees, commits, tags, or other metadata were included (though it may
|
|
|
|
fail to represent de-duplication; see below). It also represents the
|
|
|
|
current packing, which may be suboptimal if you haven't gc'ed for a
|
|
|
|
while.
|
|
|
|
|
|
|
|
Unpacked size represents what size your repository would be if no if
|
|
|
|
no trees, commits, tags, or other metadata were included AND if no
|
|
|
|
files were packed; i.e., without delta-ing or compression.
|
|
|
|
|
|
|
|
Both unpacked and packed sizes can be slightly misleading. Deleting
|
|
|
|
a blob from history not save as much space as the unpacked size,
|
|
|
|
because it is obviously normally stored in packed form. Also,
|
|
|
|
deleting a blob from history may not save as much space as its packed
|
|
|
|
size either, because another blob could be stored as a delta against
|
|
|
|
that blob, so when you remove one blob another blob's packed size may
|
|
|
|
grow.
|
|
|
|
|
|
|
|
Also, the sum of the packed sizes can add up to more than the
|
|
|
|
repository size; if the same contents appeared in the repository in
|
|
|
|
multiple places, git will automatically de-dupe and store only one
|
|
|
|
copy, while the way sizes are added in this analysis adds the size
|
|
|
|
for each file path that has those contents. Further, if a file is
|
|
|
|
ever reverted to a previous version's contents, the previous
|
|
|
|
version's size will be counted multiple times in this analysis, even
|
|
|
|
though git will only store it once.
|
|
|
|
"""[1:]))
|
|
|
|
f.write("\n")
|
|
|
|
f.write("=== Deletions ===\n")
|
|
|
|
f.write(textwrap.dedent("""
|
|
|
|
Whether a file is deleted is not a binary quality, since it can be
|
|
|
|
deleted on some branches but still exist in others. Also, it might
|
|
|
|
exist in an old tag, but have been deleted in versions newer than
|
|
|
|
that. More thorough tracking could be done, including looking at
|
|
|
|
merge commits where one side of history deleted and the other modified,
|
|
|
|
in order to give a more holistic picture of deletions. However, that
|
|
|
|
algorithm would not only be more complex to implement, it'd also be
|
|
|
|
quite difficult to present and interpret by users. Since --analyze
|
|
|
|
is just about getting a high-level rough picture of history, it instead
|
|
|
|
implements the simplistic rule that is good enough for 98% of cases:
|
|
|
|
A file is marked as deleted if the last commit in the fast-export
|
|
|
|
stream that mentions the file lists it as deleted.
|
|
|
|
This makes it dependent on topological ordering, but generally gives
|
|
|
|
the "right" answer.
|
|
|
|
"""[1:]))
|
|
|
|
f.write("\n")
|
|
|
|
f.write("=== Renames ===\n")
|
|
|
|
f.write(textwrap.dedent("""
|
|
|
|
Renames share the same non-binary nature that deletions do, plus
|
|
|
|
additional challenges:
|
|
|
|
* If the renamed file is renamed again, instead of just two names for
|
|
|
|
a path you can have three or more.
|
|
|
|
* Rename pairs of the form (oldname, newname) that we consider to be
|
|
|
|
different names of the "same file" might only be valid over certain
|
|
|
|
commit ranges. For example, if a new commit reintroduces a file
|
|
|
|
named oldname, then new versions of oldname aren't the "same file"
|
|
|
|
anymore. We could try to portray this to the user, but it's easier
|
|
|
|
for the user to just break the pairing and only report unbroken
|
|
|
|
rename pairings to the user.
|
|
|
|
* The ability for users to rename files differently in different
|
|
|
|
branches means that our chains of renames will not necessarily be
|
|
|
|
linear but may branch out.
|
|
|
|
"""[1:]))
|
|
|
|
f.write("\n")
|
|
|
|
|
|
|
|
# Equivalence classes for names, so if folks only want to keep a
|
|
|
|
# certain set of paths, they know the old names they want to include
|
|
|
|
# too.
|
|
|
|
with open(os.path.join(reportdir, "renames.txt"), 'w') as f:
|
|
|
|
seen = set()
|
|
|
|
for pathname,equiv_group in sorted(stats['equivalence'].iteritems(),
|
|
|
|
key=lambda x:x[1]):
|
|
|
|
if equiv_group in seen:
|
|
|
|
continue
|
|
|
|
seen.add(equiv_group)
|
|
|
|
f.write("{} ->\n ".format(equiv_group[0]) +
|
|
|
|
"\n ".join(equiv_group[1:]) +
|
|
|
|
"\n")
|
|
|
|
|
|
|
|
# List directories in reverse sorted order of unpacked size
|
|
|
|
with open(os.path.join(reportdir, "directories-deleted-sizes.txt"), 'w') as f:
|
|
|
|
f.write("=== Deleted directories by reverse size ===\n")
|
|
|
|
f.write("Format: unpacked size, packed size, date deleted, directory name\n")
|
|
|
|
for dirname, size in sorted(dir_size['packed'].iteritems(),
|
|
|
|
key=lambda x:x[1], reverse=True):
|
|
|
|
if (dir_deleted_data[dirname]):
|
|
|
|
f.write(" {:10d} {:10d} {:10s} {}\n"
|
|
|
|
.format(dir_size['unpacked'][dirname],
|
|
|
|
size,
|
|
|
|
datestr(dir_deleted_data[dirname]),
|
|
|
|
dirname or '<toplevel>'))
|
|
|
|
|
|
|
|
with open(os.path.join(reportdir, "directories-all-sizes.txt"), 'w') as f:
|
|
|
|
f.write("=== All directories by reverse size ===\n")
|
|
|
|
f.write("Format: unpacked size, packed size, date deleted, directory name\n")
|
|
|
|
for dirname, size in sorted(dir_size['packed'].iteritems(),
|
|
|
|
key=lambda x:x[1], reverse=True):
|
|
|
|
f.write(" {:10d} {:10d} {:10s} {}\n"
|
|
|
|
.format(dir_size['unpacked'][dirname],
|
|
|
|
size,
|
|
|
|
datestr(dir_deleted_data[dirname]),
|
|
|
|
dirname or '<toplevel>'))
|
|
|
|
|
|
|
|
# List extensions in reverse sorted order of unpacked size
|
|
|
|
with open(os.path.join(reportdir, "extensions-deleted-sizes.txt"), 'w') as f:
|
|
|
|
f.write("=== Deleted extensions by reverse size ===\n")
|
|
|
|
f.write("Format: unpacked size, packed size, date deleted, extension name\n")
|
|
|
|
for extname, size in sorted(ext_size['packed'].iteritems(),
|
|
|
|
key=lambda x:x[1], reverse=True):
|
|
|
|
if (ext_deleted_data[extname]):
|
|
|
|
f.write(" {:10d} {:10d} {:10s} {}\n"
|
|
|
|
.format(ext_size['unpacked'][extname],
|
|
|
|
size,
|
|
|
|
datestr(ext_deleted_data[extname]),
|
|
|
|
extname or '<no extension>'))
|
|
|
|
|
|
|
|
with open(os.path.join(reportdir, "extensions-all-sizes.txt"), 'w') as f:
|
|
|
|
f.write("=== All extensions by reverse size ===\n")
|
|
|
|
f.write("Format: unpacked size, packed size, date deleted, extension name\n")
|
|
|
|
for extname, size in sorted(ext_size['packed'].iteritems(),
|
|
|
|
key=lambda x:x[1], reverse=True):
|
|
|
|
f.write(" {:10d} {:10d} {:10s} {}\n"
|
|
|
|
.format(ext_size['unpacked'][extname],
|
|
|
|
size,
|
|
|
|
datestr(ext_deleted_data[extname]),
|
|
|
|
extname or '<no extension>'))
|
|
|
|
|
|
|
|
# List files in reverse sorted order of unpacked size
|
|
|
|
with open(os.path.join(reportdir, "path-deleted-sizes.txt"), 'w') as f:
|
|
|
|
f.write("=== Deleted paths by reverse accumulated size ===\n")
|
|
|
|
f.write("Format: unpacked size, packed size, date deleted, path name(s)\n")
|
|
|
|
for pathname, size in sorted(path_size['packed'].iteritems(),
|
|
|
|
key=lambda x:x[1], reverse=True):
|
|
|
|
when = stats['file_deletions'].get(pathname, None)
|
|
|
|
if when:
|
|
|
|
f.write(" {:10d} {:10d} {:10s} {}\n"
|
|
|
|
.format(path_size['unpacked'][pathname],
|
|
|
|
size,
|
|
|
|
datestr(when),
|
|
|
|
pathname))
|
|
|
|
|
|
|
|
with open(os.path.join(reportdir, "path-all-sizes.txt"), 'w') as f:
|
|
|
|
f.write("=== All paths by reverse accumulated size ===\n")
|
|
|
|
f.write("Format: unpacked size, packed size, date deleted, pathectory name\n")
|
|
|
|
for pathname, size in sorted(path_size['packed'].iteritems(),
|
|
|
|
key=lambda x:x[1], reverse=True):
|
|
|
|
when = stats['file_deletions'].get(pathname, None)
|
|
|
|
f.write(" {:10d} {:10d} {:10s} {}\n"
|
|
|
|
.format(path_size['unpacked'][pathname],
|
|
|
|
size,
|
|
|
|
datestr(when),
|
|
|
|
pathname))
|
|
|
|
|
|
|
|
# List of filenames and sizes in descending order
|
|
|
|
with open(os.path.join(reportdir, "blob-shas-and-paths.txt"), 'w') as f:
|
|
|
|
f.write("== Files by sha and associated pathnames in reverse size ==\n")
|
|
|
|
f.write("Format: sha, unpacked size, packed size, filename(s) object stored as\n")
|
|
|
|
for sha, size in sorted(stats['packed_size'].iteritems(),
|
|
|
|
key=lambda x:x[1], reverse=True):
|
|
|
|
if sha not in stats['names']:
|
|
|
|
# Some objects in the repository might not be referenced, or not
|
|
|
|
# referenced by the branches/tags the user cares about; skip them.
|
|
|
|
continue
|
|
|
|
names_with_sha = stats['names'][sha]
|
|
|
|
if len(names_with_sha) == 1:
|
|
|
|
names_with_sha = names_with_sha.pop()
|
|
|
|
else:
|
|
|
|
names_with_sha = sorted(list(names_with_sha))
|
|
|
|
f.write(" {} {:10d} {:10d} {}\n".format(sha,
|
|
|
|
stats['unpacked_size'][sha],
|
|
|
|
size,
|
|
|
|
names_with_sha))
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def run(args):
|
|
|
|
git_dir = GitUtils.determine_git_dir('.')
|
|
|
|
|
|
|
|
# Create the report directory as necessary
|
|
|
|
results_tmp_dir = os.path.join(git_dir, 'filter-repo')
|
|
|
|
if not os.path.isdir(results_tmp_dir):
|
|
|
|
os.mkdir(results_tmp_dir)
|
|
|
|
reportdir = os.path.join(results_tmp_dir, "analysis")
|
|
|
|
if not args.force and os.path.isdir(reportdir):
|
|
|
|
raise SystemExit("Error: {} already exists; refusing to overwrite!".
|
|
|
|
format(reportdir))
|
|
|
|
os.mkdir(reportdir)
|
|
|
|
|
|
|
|
# Gather the data we need
|
|
|
|
stats = RepoAnalyze.gather_data(args)
|
|
|
|
|
|
|
|
# Write the reports
|
|
|
|
sys.stdout.write("Writing reports to {}...".format(reportdir))
|
|
|
|
sys.stdout.flush()
|
|
|
|
RepoAnalyze.write_report(reportdir, stats)
|
|
|
|
sys.stdout.write("done.\n")
|
filter-repo: add --analyze option
This option walks through the repository history and creates a report
with basic statistics, rename related information, and sizes of objects
and when/if those have been deleted. It primarily looks at unpacked
sizes (i.e. size of object ignoring delta-ing and compression), and
sums the size of each version of the file for each path. Additionally,
it aggregates these sums by extension and by directory, and tracks
whether paths, extensions, and directories have been deleted. This can
be very useful in determining what the big things are, and whether they
might have been considered to have been mistakes to add to the
repository in the first place.
There are numerous caveats with the determination of "deleted" and
"renamed", and can give both false positives and false negatives. But
they are only meant as a helpful heuristic to give others a starting
point for an investigation, and the information provide so far is useful.
I do want to improve the equivalence classes (rename handling), but that
is for a future commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
|
|
|
|
class InputFileBackup:
|
|
|
|
def __init__(self, input_file, output_file):
|
|
|
|
self.input_file = input_file
|
|
|
|
self.output_file = output_file
|
|
|
|
|
|
|
|
def read(self, size):
|
|
|
|
output = self.input_file.read(size)
|
|
|
|
self.output_file.write(output)
|
|
|
|
return output
|
|
|
|
|
|
|
|
def readline(self):
|
|
|
|
line = self.input_file.readline()
|
|
|
|
self.output_file.write(line)
|
|
|
|
return line
|
|
|
|
|
|
|
|
class DualFileWriter:
|
|
|
|
def __init__(self, file1, file2):
|
|
|
|
self.file1 = file1
|
|
|
|
self.file2 = file2
|
|
|
|
|
|
|
|
def write(self, *args):
|
|
|
|
self.file1.write(*args)
|
|
|
|
self.file2.write(*args)
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
self.file1.close()
|
|
|
|
self.file2.close()
|
|
|
|
|
|
|
|
class RepoFilter(object):
|
|
|
|
def __init__(self,
|
|
|
|
args,
|
|
|
|
blob_callback = None,
|
|
|
|
commit_callback = None,
|
|
|
|
tag_callback = None,
|
|
|
|
reset_callback = None,
|
|
|
|
everything_callback = None):
|
|
|
|
# Store arguments for later use
|
|
|
|
self._args = args
|
|
|
|
self._blob_callback = blob_callback
|
|
|
|
self._commit_callback = commit_callback
|
|
|
|
self._tag_callback = tag_callback
|
|
|
|
self._reset_callback = reset_callback
|
|
|
|
self._everything_callback = everything_callback
|
|
|
|
|
|
|
|
# Defaults for input
|
|
|
|
self._input = None
|
|
|
|
self._fep = None # Fast Export Process
|
|
|
|
self._fe_orig = None # Path to where original fast-export output stored
|
|
|
|
self._fe_filt = None # Path to where filtered fast-export output stored
|
|
|
|
|
|
|
|
# Defaults for output
|
|
|
|
self._output = None
|
|
|
|
self._fip = None # Fast Import Process
|
|
|
|
self._import_pipes = None
|
|
|
|
self._managed_output = True
|
|
|
|
|
|
|
|
# Other vars
|
|
|
|
self._sanity_checks_handled = False
|
|
|
|
self._orig_refs = None
|
|
|
|
self._newnames = {}
|
|
|
|
|
|
|
|
def _run_sanity_checks(self):
|
|
|
|
self._sanity_checks_handled = True
|
|
|
|
if not self._managed_output:
|
|
|
|
return
|
|
|
|
|
|
|
|
if self._args.debug:
|
|
|
|
print("[DEBUG] Passed arguments:\n{}".format(self._args))
|
|
|
|
|
|
|
|
# Determine basic repository information
|
|
|
|
target_working_dir = self._args.target or '.'
|
|
|
|
self._orig_refs = GitUtils.get_refs(target_working_dir)
|
|
|
|
is_bare = GitUtils.is_repository_bare(target_working_dir)
|
|
|
|
|
|
|
|
# Do sanity checks from the correct directory
|
|
|
|
if not self._args.force:
|
|
|
|
cwd = os.getcwd()
|
|
|
|
os.chdir(target_working_dir)
|
|
|
|
RepoFilter.sanity_check(self._orig_refs, is_bare)
|
|
|
|
os.chdir(cwd)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def sanity_check(refs, is_bare):
|
|
|
|
def abort(reason):
|
|
|
|
raise SystemExit(
|
|
|
|
"Aborting: Refusing to overwrite repo history since this does not\n"
|
|
|
|
"look like a fresh clone.\n"
|
|
|
|
" ("+reason+")\n"
|
|
|
|
"To override, use --force.")
|
|
|
|
|
|
|
|
# Make sure repo is fully packed, just like a fresh clone would be
|
|
|
|
output = subprocess.check_output('git count-objects -v'.split())
|
|
|
|
stats = dict(x.split(': ') for x in output.splitlines())
|
|
|
|
num_packs = int(stats['packs'])
|
|
|
|
if stats['count'] != '0' or num_packs > 1:
|
|
|
|
abort("expected freshly packed repo")
|
|
|
|
|
|
|
|
# Make sure there is precisely one remote, named "origin"...or that this
|
|
|
|
# is a new bare repo with no packs and no remotes
|
|
|
|
output = subprocess.check_output('git remote'.split()).strip()
|
|
|
|
if not (output == "origin" or (num_packs == 0 and not output)):
|
|
|
|
abort("expected one remote, origin")
|
|
|
|
|
|
|
|
# Avoid letting people running with weird setups and overwriting GIT_DIR
|
|
|
|
# elsewhere
|
|
|
|
git_dir = GitUtils.determine_git_dir('.')
|
|
|
|
if is_bare and git_dir != '.':
|
|
|
|
abort("GIT_DIR must be .")
|
|
|
|
elif not is_bare and git_dir != '.git':
|
|
|
|
abort("GIT_DIR must be .git")
|
|
|
|
|
|
|
|
# Make sure that all reflogs have precisely one entry
|
|
|
|
reflog_dir=os.path.join(git_dir, 'logs')
|
|
|
|
for root, dirs, files in os.walk(reflog_dir):
|
|
|
|
for filename in files:
|
|
|
|
pathname = os.path.join(root, filename)
|
|
|
|
with open(pathname) as f:
|
|
|
|
if len(f.read().splitlines()) > 1:
|
|
|
|
shortpath = pathname[len(reflog_dir)+1:]
|
|
|
|
abort("expected at most one entry in the reflog for " + shortpath)
|
|
|
|
|
|
|
|
# Make sure there are no stashed changes
|
|
|
|
if 'refs/stash' in refs:
|
|
|
|
abort("has stashed changes")
|
|
|
|
|
|
|
|
# Do extra checks in non-bare repos
|
|
|
|
if not is_bare:
|
|
|
|
# Avoid uncommitted, unstaged, or untracked changes
|
|
|
|
if subprocess.call('git diff --staged'.split()):
|
|
|
|
abort("you have uncommitted changes")
|
|
|
|
if subprocess.call('git diff --quiet'.split()):
|
|
|
|
abort("you have unstaged changes")
|
|
|
|
if len(subprocess.check_output('git ls-files -o'.split())) > 0:
|
|
|
|
abort("you have untracked changes")
|
|
|
|
|
|
|
|
# Avoid unpushed changes
|
|
|
|
for refname, rev in refs.iteritems():
|
|
|
|
if not refname.startswith('refs/heads/'):
|
|
|
|
continue
|
|
|
|
origin_ref = refname.replace('refs/heads/', 'refs/remotes/origin/')
|
|
|
|
if origin_ref not in refs:
|
|
|
|
abort('{} exists, but {} not found'.format(refname, origin_ref))
|
|
|
|
if rev != refs[origin_ref]:
|
|
|
|
abort('{} does not match {}'.format(refname, origin_ref))
|
|
|
|
|
filter-repo: add text removal (or replacement) via file of expressions
Make it easy for users to search and replace text throughout the
repository history. Instead of inventing some new syntax, reuse the
same syntax used by BFG repo filter's --replace-text option, namely,
a file with one expression per line of the form
[regex:|glob:|literal:]$MATCH_EXPR[==>$REPLACEMENT_EXPR]
Where "$MATCH_EXPR" is by default considered to be literal text, but
could be a regex or a glob if the appropriate prefix is used. Also,
$REPLACEMENT_EXPR defaults to '***REMOVED***' if not specified. If
you want a literal '==>' to be part of your $MATCH_EXPR, then you
must also manually specify a replacement expression instead of taking
the default. Some examples:
sup3rs3kr3t
(replaces 'sup3rs3kr3t' with '***REMOVED***')
HeWhoShallNotBeNamed==>Voldemort
(replaces 'HeWhoShallNotBeNamed' with 'Voldemort')
very==>
(replaces 'very' with the empty string)
regex:(\d{2})/(\d{2})/(\d{4})==>\2/\1/\3
(replaces '05/17/2012' with '17/05/2012', and vice-versa)
The format for regex is as from
re.sub(<pattern>, <repl>, <string>) from
https://docs.python.org/2/library/re.html
The <string> comes from file contents of the repo, and you specify
the <pattern> and <repl>.
glob:Copy*t==>Cartel
(replaces 'Copyright' or 'Copyleft' or 'Copy my st' with 'Cartel')
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
@staticmethod
|
|
|
|
def tweak_blob(args, blob):
|
|
|
|
if args.replace_text:
|
|
|
|
for literal, replacement in args.replace_text['literals']:
|
|
|
|
blob.data = blob.data.replace(literal, replacement)
|
|
|
|
for regex, replacement in args.replace_text['regexes']:
|
|
|
|
blob.data = regex.sub(replacement, blob.data)
|
|
|
|
|
|
|
|
def tweak_commit(self, args, commit):
|
|
|
|
def filename_matches(path_expression, pathname):
|
|
|
|
if path_expression == '':
|
|
|
|
return True
|
|
|
|
n = len(path_expression)
|
|
|
|
if (pathname.startswith(path_expression) and
|
|
|
|
(path_expression[n-1] == '/' or
|
|
|
|
len(pathname) == n or
|
|
|
|
pathname[n] == '/')):
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
def newname(path_changes, pathname, filtering_is_inclusive):
|
|
|
|
wanted = False
|
|
|
|
for (mod_type, match_type, path_exp) in path_changes:
|
|
|
|
if mod_type == 'filter' and not wanted:
|
|
|
|
assert match_type in ('match', 'glob', 'regex')
|
|
|
|
if match_type == 'match' and filename_matches(path_exp, pathname):
|
|
|
|
wanted = True
|
|
|
|
if match_type == 'glob' and fnmatch.fnmatch(pathname, path_exp):
|
|
|
|
wanted = True
|
|
|
|
if match_type == 'regex' and path_exp.search(pathname):
|
|
|
|
wanted = True
|
|
|
|
elif mod_type == 'rename':
|
|
|
|
old_exp, new_exp = path_exp.split(':')
|
|
|
|
assert match_type in ('prefix',)
|
|
|
|
if match_type == 'prefix' and pathname.startswith(old_exp):
|
|
|
|
pathname = pathname.replace(old_exp, new_exp, 1)
|
|
|
|
return pathname if (wanted == filtering_is_inclusive) else None
|
|
|
|
|
|
|
|
# Change the author & committer according to mailmap rules
|
|
|
|
if args.mailmap:
|
|
|
|
commit.author_name, commit.author_email = \
|
|
|
|
args.mailmap.translate(commit.author_name, commit.author_email)
|
|
|
|
commit.committer_name, commit.committer_email = \
|
|
|
|
args.mailmap.translate(commit.committer_name, commit.committer_email)
|
|
|
|
|
|
|
|
# Sometimes the 'branch' given is a tag; if so, rename it as requested so
|
|
|
|
# we don't get any old tagnames
|
|
|
|
commit.branch = RepoFilter.new_tagname(args, commit.branch)
|
|
|
|
|
|
|
|
# Filter the list of file changes
|
|
|
|
new_file_changes = {}
|
|
|
|
for change in commit.file_changes:
|
|
|
|
if change.filename in self._newnames:
|
|
|
|
change.filename = self._newnames[change.filename]
|
|
|
|
else:
|
|
|
|
change.filename = newname(args.path_changes, change.filename,
|
|
|
|
args.inclusive)
|
|
|
|
self._newnames[change.filename] = change.filename
|
|
|
|
if not change.filename:
|
|
|
|
continue # Filtering criteria excluded this file; move on to next one
|
|
|
|
if change.filename in new_file_changes:
|
|
|
|
# Getting here means that path renaming is in effect, and caused one
|
|
|
|
# path to collide with another. That's usually bad, but sometimes
|
|
|
|
# people have a file named OLDFILE in old revisions of history, and they
|
|
|
|
# rename to NEWFILE, and would like to rewrite history so that all
|
|
|
|
# revisions refer to it as NEWFILE. As such, we can allow a collision
|
|
|
|
# when (at least) one of the two paths is a deletion. Note that if
|
|
|
|
# OLDFILE and NEWFILE are unrelated this also allows the rewrite to
|
|
|
|
# continue, which makes sense since OLDFILE is no longer in the way.
|
|
|
|
if change.type == 'D':
|
|
|
|
# We can just throw this one away and keep the other
|
|
|
|
continue
|
|
|
|
elif new_file_changes[change.filename].type != 'D':
|
|
|
|
raise SystemExit("File renaming caused colliding pathnames!\n" +
|
|
|
|
" Commit: {}\n".format(commit.original_id) +
|
|
|
|
" Filename: {}".format(change.filename))
|
|
|
|
new_file_changes[change.filename] = change
|
|
|
|
commit.file_changes = new_file_changes.values()
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def new_tagname(args, tagname, shortname = False):
|
|
|
|
replace = args.tag_rename
|
|
|
|
if not replace:
|
|
|
|
return tagname
|
|
|
|
old, new = replace.split(':', 1)
|
|
|
|
if not shortname:
|
|
|
|
old, new = 'refs/tags/'+old, 'refs/tags/'+new
|
|
|
|
if tagname.startswith(old):
|
|
|
|
return tagname.replace(old, new, 1)
|
|
|
|
return tagname
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def handle_tag(args, tag, shortname = False):
|
|
|
|
tag.ref = RepoFilter.new_tagname(args, tag.ref, shortname)
|
|
|
|
if args.mailmap:
|
|
|
|
tag.tagger_name, tag.tagger_email = \
|
|
|
|
args.mailmap.translate(tag.tagger_name, tag.tagger_email)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def handle_reset(args, reset, shortname = False):
|
|
|
|
reset.ref = RepoFilter.new_tagname(args, reset.ref, shortname)
|
|
|
|
|
|
|
|
def results_tmp_dir(self):
|
|
|
|
working_dir = self._args.target or self._args.source or '.'
|
|
|
|
git_dir = GitUtils.determine_git_dir(working_dir)
|
|
|
|
d = os.path.join(git_dir, 'filter-repo')
|
|
|
|
if not os.path.isdir(d):
|
|
|
|
os.mkdir(d)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def importer_only(self):
|
|
|
|
self._run_sanity_checks()
|
|
|
|
self._setup_output()
|
|
|
|
|
|
|
|
def set_output(self, outputRepoFilter):
|
|
|
|
assert outputRepoFilter._output
|
|
|
|
|
|
|
|
# set_output implies this RepoFilter is doing exporting, though may not
|
|
|
|
# be the only one.
|
|
|
|
self._setup_input(use_done_feature = False)
|
|
|
|
|
|
|
|
# Set our output management up to pipe to outputRepoFilter's locations
|
|
|
|
self._managed_output = False
|
|
|
|
self._output = outputRepoFilter._output
|
|
|
|
self._import_pipes = outputRepoFilter._import_pipes
|
|
|
|
|
|
|
|
# Handle sanity checks, though currently none needed for export-only cases
|
|
|
|
self._run_sanity_checks()
|
|
|
|
|
|
|
|
def _setup_input(self, use_done_feature):
|
|
|
|
if self._args.stdin:
|
|
|
|
self._input = sys.stdin
|
|
|
|
self._fe_orig = None
|
|
|
|
else:
|
|
|
|
skip_blobs = (self._blob_callback is None and
|
|
|
|
self._everything_callback is None and
|
filter-repo: add text removal (or replacement) via file of expressions
Make it easy for users to search and replace text throughout the
repository history. Instead of inventing some new syntax, reuse the
same syntax used by BFG repo filter's --replace-text option, namely,
a file with one expression per line of the form
[regex:|glob:|literal:]$MATCH_EXPR[==>$REPLACEMENT_EXPR]
Where "$MATCH_EXPR" is by default considered to be literal text, but
could be a regex or a glob if the appropriate prefix is used. Also,
$REPLACEMENT_EXPR defaults to '***REMOVED***' if not specified. If
you want a literal '==>' to be part of your $MATCH_EXPR, then you
must also manually specify a replacement expression instead of taking
the default. Some examples:
sup3rs3kr3t
(replaces 'sup3rs3kr3t' with '***REMOVED***')
HeWhoShallNotBeNamed==>Voldemort
(replaces 'HeWhoShallNotBeNamed' with 'Voldemort')
very==>
(replaces 'very' with the empty string)
regex:(\d{2})/(\d{2})/(\d{4})==>\2/\1/\3
(replaces '05/17/2012' with '17/05/2012', and vice-versa)
The format for regex is as from
re.sub(<pattern>, <repl>, <string>) from
https://docs.python.org/2/library/re.html
The <string> comes from file contents of the repo, and you specify
the <pattern> and <repl>.
glob:Copy*t==>Cartel
(replaces 'Copyright' or 'Copyleft' or 'Copy my st' with 'Cartel')
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
self._args.replace_text is None and
|
|
|
|
self._args.source is None and
|
|
|
|
self._args.target is None)
|
|
|
|
extra_flags = ['--no-data'] if skip_blobs else []
|
|
|
|
done_feature = ['--use-done-feature'] if use_done_feature else []
|
|
|
|
location = ['-C', self._args.source] if self._args.source else []
|
|
|
|
fep_cmd = ['git'] + location + ['fast-export', '--show-original-ids',
|
|
|
|
'--signed-tags=strip', '--tag-of-filtered-object=rewrite'
|
|
|
|
] + done_feature + extra_flags + self._args.refs
|
|
|
|
self._fep = subprocess.Popen(fep_cmd, bufsize=-1, stdout=subprocess.PIPE)
|
|
|
|
self._input = self._fep.stdout
|
|
|
|
if self._args.dry_run or self._args.debug:
|
|
|
|
self._fe_orig = os.path.join(self.results_tmp_dir(),
|
|
|
|
'fast-export.original')
|
|
|
|
output = open(self._fe_orig, 'w')
|
|
|
|
self._input = InputFileBackup(self._input, output)
|
|
|
|
if self._args.debug:
|
|
|
|
print("[DEBUG] Running: {}".format(' '.join(fep_cmd)))
|
|
|
|
print(" (saving a copy of the output at {})".format(self._fe_orig))
|
|
|
|
|
|
|
|
def _setup_output(self):
|
|
|
|
if not self._args.dry_run:
|
|
|
|
location = ['-C', self._args.target] if self._args.target else []
|
|
|
|
fip_cmd = ['git'] + location + 'fast-import --force --quiet'.split()
|
|
|
|
self._fip = subprocess.Popen(fip_cmd,
|
|
|
|
bufsize=-1,
|
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
stdout=subprocess.PIPE)
|
|
|
|
self._import_pipes = (self._fip.stdin, self._fip.stdout)
|
|
|
|
if self._args.dry_run or self._args.debug:
|
|
|
|
self._fe_filt = os.path.join(self.results_tmp_dir(),
|
|
|
|
'fast-export.filtered')
|
|
|
|
self._output = open(self._fe_filt, 'w')
|
|
|
|
else:
|
|
|
|
self._output = self._fip.stdin
|
|
|
|
if self._args.debug:
|
|
|
|
self._output = DualFileWriter(self._fip.stdin, self._output)
|
|
|
|
print("[DEBUG] Running: {}".format(' '.join(fip_cmd)))
|
|
|
|
print(" (using the following file as input: {})".format(self._fe_filt))
|
|
|
|
|
|
|
|
def _migrate_origin_to_heads(self):
|
|
|
|
if self._args.dry_run:
|
|
|
|
return
|
|
|
|
refs_to_migrate = set(x for x in self._orig_refs
|
|
|
|
if x.startswith('refs/remotes/origin/'))
|
|
|
|
if not refs_to_migrate:
|
|
|
|
return
|
|
|
|
if self._args.debug:
|
|
|
|
print("[DEBUG] Migrating refs/remotes/origin/* -> refs/heads/*")
|
|
|
|
target_working_dir = self._args.target or '.'
|
|
|
|
p = subprocess.Popen('git update-ref --no-deref --stdin'.split(),
|
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
cwd=target_working_dir)
|
|
|
|
for ref in refs_to_migrate:
|
|
|
|
if ref == 'refs/remotes/origin/HEAD':
|
|
|
|
p.stdin.write('delete {} {}\n'.format(ref, self._orig_refs[ref]))
|
|
|
|
del self._orig_refs[ref]
|
|
|
|
continue
|
|
|
|
newref = ref.replace('refs/remotes/origin/', 'refs/heads/')
|
|
|
|
if newref not in self._orig_refs:
|
|
|
|
p.stdin.write('create {} {}\n'.format(newref, self._orig_refs[ref]))
|
|
|
|
p.stdin.write('delete {} {}\n'.format(ref, self._orig_refs[ref]))
|
|
|
|
self._orig_refs[newref] = self._orig_refs[ref]
|
|
|
|
del self._orig_refs[ref]
|
|
|
|
p.stdin.close()
|
|
|
|
if p.wait():
|
|
|
|
raise SystemExit("git update-ref failed; see above")
|
|
|
|
|
|
|
|
# Now remove
|
|
|
|
if self._args.debug:
|
|
|
|
print("[DEBUG] Removing 'origin' remote (rewritten history will no ")
|
|
|
|
print(" longer be related; consider re-pushing it elsewhere.")
|
|
|
|
subprocess.call('git remote rm origin'.split(), cwd=target_working_dir)
|
|
|
|
|
|
|
|
def finish(self):
|
|
|
|
''' Alternative to run() when there is no input of our own to parse,
|
|
|
|
meaning that run only really needs to close the handle to fast-import
|
|
|
|
and let it finish, thus making a call to "run" feel like a misnomer. '''
|
|
|
|
assert not self._input
|
|
|
|
assert self._managed_output
|
|
|
|
self.run()
|
|
|
|
|
|
|
|
def run(self):
|
|
|
|
start = time.time()
|
|
|
|
if not self._input and not self._output:
|
|
|
|
self._run_sanity_checks()
|
|
|
|
self._migrate_origin_to_heads()
|
|
|
|
self._setup_input(use_done_feature = True)
|
|
|
|
self._setup_output()
|
|
|
|
assert self._sanity_checks_handled
|
|
|
|
|
|
|
|
if self._input:
|
|
|
|
# Set up the callbacks
|
filter-repo: add text removal (or replacement) via file of expressions
Make it easy for users to search and replace text throughout the
repository history. Instead of inventing some new syntax, reuse the
same syntax used by BFG repo filter's --replace-text option, namely,
a file with one expression per line of the form
[regex:|glob:|literal:]$MATCH_EXPR[==>$REPLACEMENT_EXPR]
Where "$MATCH_EXPR" is by default considered to be literal text, but
could be a regex or a glob if the appropriate prefix is used. Also,
$REPLACEMENT_EXPR defaults to '***REMOVED***' if not specified. If
you want a literal '==>' to be part of your $MATCH_EXPR, then you
must also manually specify a replacement expression instead of taking
the default. Some examples:
sup3rs3kr3t
(replaces 'sup3rs3kr3t' with '***REMOVED***')
HeWhoShallNotBeNamed==>Voldemort
(replaces 'HeWhoShallNotBeNamed' with 'Voldemort')
very==>
(replaces 'very' with the empty string)
regex:(\d{2})/(\d{2})/(\d{4})==>\2/\1/\3
(replaces '05/17/2012' with '17/05/2012', and vice-versa)
The format for regex is as from
re.sub(<pattern>, <repl>, <string>) from
https://docs.python.org/2/library/re.html
The <string> comes from file contents of the repo, and you specify
the <pattern> and <repl>.
glob:Copy*t==>Cartel
(replaces 'Copyright' or 'Copyleft' or 'Copy my st' with 'Cartel')
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
def combined_blob_callback(b):
|
|
|
|
RepoFilter.tweak_blob(self._args, b)
|
|
|
|
self._blob_callback and self._blob_callback(b)
|
|
|
|
def actual_commit_callback(c):
|
|
|
|
self.tweak_commit(self._args, c)
|
|
|
|
self._commit_callback and self._commit_callback(c)
|
|
|
|
def actual_tag_callback(t):
|
|
|
|
RepoFilter.handle_tag(self._args, t, shortname = True)
|
|
|
|
self._tag_callback and self._tag_callback(t)
|
|
|
|
def actual_reset_callback(r):
|
|
|
|
RepoFilter.handle_reset(self._args, r)
|
|
|
|
self._reset_callback and self._reset_callback(r)
|
filter-repo: add text removal (or replacement) via file of expressions
Make it easy for users to search and replace text throughout the
repository history. Instead of inventing some new syntax, reuse the
same syntax used by BFG repo filter's --replace-text option, namely,
a file with one expression per line of the form
[regex:|glob:|literal:]$MATCH_EXPR[==>$REPLACEMENT_EXPR]
Where "$MATCH_EXPR" is by default considered to be literal text, but
could be a regex or a glob if the appropriate prefix is used. Also,
$REPLACEMENT_EXPR defaults to '***REMOVED***' if not specified. If
you want a literal '==>' to be part of your $MATCH_EXPR, then you
must also manually specify a replacement expression instead of taking
the default. Some examples:
sup3rs3kr3t
(replaces 'sup3rs3kr3t' with '***REMOVED***')
HeWhoShallNotBeNamed==>Voldemort
(replaces 'HeWhoShallNotBeNamed' with 'Voldemort')
very==>
(replaces 'very' with the empty string)
regex:(\d{2})/(\d{2})/(\d{4})==>\2/\1/\3
(replaces '05/17/2012' with '17/05/2012', and vice-versa)
The format for regex is as from
re.sub(<pattern>, <repl>, <string>) from
https://docs.python.org/2/library/re.html
The <string> comes from file contents of the repo, and you specify
the <pattern> and <repl>.
glob:Copy*t==>Cartel
(replaces 'Copyright' or 'Copyleft' or 'Copy my st' with 'Cartel')
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
actual_blob_callback = self._blob_callback
|
|
|
|
if self._args.replace_text:
|
|
|
|
actual_blob_callback = combined_blob_callback
|
|
|
|
|
|
|
|
# Create and run the filter
|
|
|
|
fef = FastExportFilter(self._args.source or '.',
|
filter-repo: add text removal (or replacement) via file of expressions
Make it easy for users to search and replace text throughout the
repository history. Instead of inventing some new syntax, reuse the
same syntax used by BFG repo filter's --replace-text option, namely,
a file with one expression per line of the form
[regex:|glob:|literal:]$MATCH_EXPR[==>$REPLACEMENT_EXPR]
Where "$MATCH_EXPR" is by default considered to be literal text, but
could be a regex or a glob if the appropriate prefix is used. Also,
$REPLACEMENT_EXPR defaults to '***REMOVED***' if not specified. If
you want a literal '==>' to be part of your $MATCH_EXPR, then you
must also manually specify a replacement expression instead of taking
the default. Some examples:
sup3rs3kr3t
(replaces 'sup3rs3kr3t' with '***REMOVED***')
HeWhoShallNotBeNamed==>Voldemort
(replaces 'HeWhoShallNotBeNamed' with 'Voldemort')
very==>
(replaces 'very' with the empty string)
regex:(\d{2})/(\d{2})/(\d{4})==>\2/\1/\3
(replaces '05/17/2012' with '17/05/2012', and vice-versa)
The format for regex is as from
re.sub(<pattern>, <repl>, <string>) from
https://docs.python.org/2/library/re.html
The <string> comes from file contents of the repo, and you specify
the <pattern> and <repl>.
glob:Copy*t==>Cartel
(replaces 'Copyright' or 'Copyleft' or 'Copy my st' with 'Cartel')
Signed-off-by: Elijah Newren <newren@gmail.com>
6 years ago
|
|
|
blob_callback = actual_blob_callback,
|
|
|
|
commit_callback = actual_commit_callback,
|
|
|
|
tag_callback = actual_tag_callback,
|
|
|
|
reset_callback = actual_reset_callback,
|
|
|
|
everything_callback = self._everything_callback)
|
|
|
|
fef.run(self._input,
|
|
|
|
self._output,
|
|
|
|
fast_import_pipes = self._import_pipes,
|
|
|
|
quiet = self._args.quiet)
|
|
|
|
|
|
|
|
# Make sure fast-export completed successfully
|
|
|
|
if not self._args.stdin and self._fep.wait():
|
|
|
|
raise SystemExit("Error: fast-export failed; see above.")
|
|
|
|
|
|
|
|
# If we're not the manager of self._output, we should avoid post-run cleanup
|
|
|
|
if not self._managed_output:
|
|
|
|
return
|
|
|
|
|
|
|
|
# Close the output and ensure fast-import successfully completes
|
|
|
|
self._output.close()
|
|
|
|
if not self._args.dry_run and self._fip.wait():
|
|
|
|
raise SystemExit("Error: fast-import failed; see above.")
|
|
|
|
|
|
|
|
# Notify user how long it took, before doing a gc and such
|
|
|
|
msg = "New history written in {:.2f} seconds; now repacking/cleaning..."
|
|
|
|
print(msg.format(time.time()-start))
|
|
|
|
|
|
|
|
# Exit early, if requested
|
|
|
|
if self._args.dry_run:
|
|
|
|
if self._fe_orig:
|
|
|
|
orig_str = "by comparing:\n "+self._fe_orig
|
|
|
|
else:
|
|
|
|
orig_str = "at:"
|
|
|
|
print("NOTE: Not running fast-import or cleaning up; --dry-run passed.")
|
|
|
|
print(" Requested filtering can be seen {}".format(orig_str))
|
|
|
|
print(" " + self._fe_filt)
|
|
|
|
sys.exit(0)
|
|
|
|
|
|
|
|
target_working_dir = self._args.target or '.'
|
|
|
|
if self._input:
|
|
|
|
# Remove unused refs
|
|
|
|
refs_to_nuke = set(self._orig_refs) - set(fef.get_seen_refs())
|
|
|
|
if refs_to_nuke:
|
|
|
|
if self._args.debug:
|
|
|
|
print("[DEBUG] Deleting the following refs:\n "+
|
|
|
|
"\n ".join(refs_to_nuke))
|
|
|
|
p = subprocess.Popen('git update-ref --stdin'.split(),
|
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
cwd=target_working_dir)
|
|
|
|
p.stdin.write(''.join(["option no-deref\ndelete {}\n".format(x)
|
|
|
|
for x in refs_to_nuke]))
|
|
|
|
p.stdin.close()
|
|
|
|
if p.wait():
|
|
|
|
raise SystemExit("git update-ref failed; see above")
|
|
|
|
|
|
|
|
# Write out data about run
|
|
|
|
fef.record_metadata(self.results_tmp_dir(),
|
|
|
|
self._orig_refs,
|
|
|
|
refs_to_nuke)
|
|
|
|
|
|
|
|
# Nuke the reflogs and repack
|
|
|
|
if not self._args.quiet and not self._args.debug:
|
|
|
|
print("Repacking your repo and cleaning out old unneeded objects")
|
|
|
|
quiet_flags = '--quiet' if self._args.quiet else ''
|
|
|
|
cleanup_cmds = ['git reflog expire --expire=now --all'.split(),
|
|
|
|
'git gc {} --prune=now'.format(quiet_flags).split()]
|
|
|
|
if not GitUtils.is_repository_bare(target_working_dir):
|
|
|
|
cleanup_cmds.append('git reset {} --hard'.format(quiet_flags).split())
|
|
|
|
for cmd in cleanup_cmds:
|
|
|
|
if self._args.debug:
|
|
|
|
print("[DEBUG] Running: {}".format(' '.join(cmd)))
|
|
|
|
subprocess.call(cmd, cwd=target_working_dir)
|
|
|
|
|
|
|
|
# Let user know how long it took
|
|
|
|
print("Completely finished after {:.2f} seconds.".format(time.time()-start))
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
args = FilteringOptions.parse_args(sys.argv[1:])
|
|
|
|
if args.analyze:
|
|
|
|
RepoAnalyze.run(args)
|
|
|
|
else:
|
|
|
|
filter = RepoFilter(args)
|
|
|
|
filter.run()
|