#
#
# patch "ChangeLog"
# from [81e3045140bcbf832f6fe43481c287b7343f50a8]
# to [effebb6d6ea8613ca6a0306794ceaf91df89128a]
#
# patch "annotate.cc"
# from [5f284a607f1ef3dd5a7e192a58c396ce135ae9ff]
# to [f77b679344341a4af199df9b59efd49b4676a6af]
#
# patch "app_state.cc"
# from [6a8be1d3051e8e84464730d9d4f60cc9abca098d]
# to [55a92b8e0df6ce29b219a3efef760983fa57c9ae]
#
# patch "automate.cc"
# from [8aa16555b3ed2bbd87bbdb395a2d23a790803f19]
# to [7d53f42255a88e41a2b1bd27c85aff62ecad823d]
#
# patch "basic_io.cc"
# from [5c8f164d4a3f4829497c834164cb09ac6341467c]
# to [0cd7c3eb2d37b603a49cab9959f582bcc4fb661f]
#
# patch "cert.cc"
# from [90434ca979245b8d440c3daead3d8ad5120e27ee]
# to [d568dcf9cbf42901fbc3aec941fb6111004a74e4]
#
# patch "charset.cc"
# from [e954d3206a9df54b32bea7aba29d723fbad74f14]
# to [c958f957b5deca18a5803c512ae5cf58d08b5fb1]
#
# patch "cmd_automate.cc"
# from [d973f6704cc5cf5a777b8e4f5bdd60ee10048b7c]
# to [9888db8b3c4b072092f933a65973fa42e94f7980]
#
# patch "cmd_db.cc"
# from [cef42bd1ec6ec18b7e37bfbd70d1d9decee2d679]
# to [7403b1a1bb54a12e01dee5fcbbc3db7aaf8ba557]
#
# patch "cmd_diff_log.cc"
# from [31fe8c5d41bd88bd58dae13ad2b21a8839b5a415]
# to [6b49be84ff5e47f22c1942927ea41ab7c24da386]
#
# patch "cmd_list.cc"
# from [5fcd71a963f52bf864f3a8f392c8d9ae328ac953]
# to [7647566d20b4a6c7c435b01f92ea90838c12646c]
#
# patch "cmd_merging.cc"
# from [8cb9a727c46da071534f510458de386f45b2c092]
# to [14dbf8776d68cf092e76c4d7479afaafd24e4705]
#
# patch "cmd_netsync.cc"
# from [431b5bb6daff4f8d8992eee7ff14a21512895b81]
# to [1085c92a4b4e60e11c02378427e0cd2c4e844acd]
#
# patch "cmd_packet.cc"
# from [26b3d187c3a139599b7c50ba80c29f81c382dfdd]
# to [0d127531d08a403249ba6fea64f09ee1fd0546e3]
#
# patch "cmd_ws_commit.cc"
# from [130ebf460f4770ca6b39dd7c5d2b8dbf39405656]
# to [159a7abbffa9e3992f5a3dba2af148de388e1f53]
#
# patch "commands.cc"
# from [a02d61685a3807368a2e1ec1fed79ea5f342ea79]
# to [9eb8b9fe197b66baa555579ff90b5e1ed9f9c8dd]
#
# patch "constants.cc"
# from [45406dd40ffed1993ee987120843b62ef9d3b8ec]
# to [b175c8ae9c808089d3e9f7216896be26714703db]
#
# patch "crypto_tests.cc"
# from [6e7f56e97edd513e405a24beb2df684d0bfab1b4]
# to [5f2ad6110bb7f9a10978ede2c860ad44b24ffed0]
#
# patch "cset.cc"
# from [17cc4461f4e98e06986d4543b0952be1272ef3e9]
# to [bf3abceb49091cfed0d14d02c1188d14378b8f6e]
#
# patch "database.cc"
# from [e185a4b7db7def9d9c5ddb0b1bc9f883d623220b]
# to [0d25c2fa019f20eb0b7299f2b569a4091571aed1]
#
# patch "database_check.cc"
# from [91784581512b6a22dba4959fd46076f8ac2d18b6]
# to [1354e4948560a3e9197df485a0d6d4208c7d442c]
#
# patch "diff_patch.cc"
# from [c538374230aa5c4eb9c455418cfa55e443af7244]
# to [830fa80815a651ce2a0d371aa7ee82ab78410845]
#
# patch "enumerator.cc"
# from [43edc729be97fabc35880f79fbea7640d109bea0]
# to [e2ebd144f9f66f1dab7a5324c62e2f7d73eb4646]
#
# patch "epoch.cc"
# from [54bd9b44a5daae23e9b188fec591acf6c79bcf90]
# to [b150f9d678d4fba5c2b35c4932a1a04a7c4564c0]
#
# patch "file_io.cc"
# from [7fc3a9cac7cb770a2a4fa826d3a23797cda1242e]
# to [758cd915fad8ada25a51dbce3d0565ed68de0895]
#
# patch "globish.cc"
# from [93e5d9dfe7630bf765164f90363c7d30cc9e4e01]
# to [409df5d1f8ff0ad46a144ed6f1344418185d54dc]
#
# patch "hmac.cc"
# from [01430adfa6339a539db4327741755a05191bab7b]
# to [3884bf3dd20a12ec2023bc4f5fb6b60eaca69fd6]
#
# patch "inodeprint.cc"
# from [49cfddcee6a39755d3d2f8d6c2bc7fb49c5a556d]
# to [62b091111b84a22f6de804b3c9d37716e7a06169]
#
# patch "key_store.cc"
# from [7a33f02a482db43ad119b210848f3fdb9a306ba0]
# to [dcdebf60e129b121b78cc0f2c53e3c7a19cf69a9]
#
# patch "keys.cc"
# from [ab0e49cae3664e3aa8f52c652e0825dea72de281]
# to [bfdcb803f58119ab0c399b553bd69e35fc6e47ce]
#
# patch "lcs.cc"
# from [fd29f8174a6718842724caece25b5ea753da1a04]
# to [6c1c76404a788927308bff3b8f52d52f8ae1c12a]
#
# patch "legacy.cc"
# from [56eba9490503059bb62f34ca4d04007d3b56bbd5]
# to [cb7eb7d67ccf5f3b3d762c3889a90f1e17509222]
#
# patch "lua.cc"
# from [8412d7a38d765381e449c100f085a2dbdca3ae63]
# to [06e4a9aeca73e87a2617ff8df2d674646bf2cb8e]
#
# patch "lua_hooks.cc"
# from [2c8a4626208b7b0128a96da8ea532f7d3fce0e85]
# to [348229d0b87e5aad59d452c8dea3f26ca9481031]
#
# patch "main.cc"
# from [0981328eff2cd408bb7d9b13208b16243feec637]
# to [1b50bf9eefcb967254582b38a5a886f8ad09eea6]
#
# patch "merge.cc"
# from [f129963451cc6c8f1562d84852c6d4e31a211eec]
# to [cc6860bdc2e1ba18713800ef24ceb08c605abab0]
#
# patch "merkle_tree.cc"
# from [0be64fcc156d10e75e0f235856e9c4c428f1915c]
# to [7011e2dbceb1d2e78db31c8c4570d443b383c639]
#
# patch "mkstemp.cc"
# from [a58750be73db167658f8b3df2993d0dec8648a5c]
# to [97f331512edf52eaa5940a6f891167e4f990c1fc]
#
# patch "monotone.cc"
# from [7ce7990a7e4ebb99f1980ccdeca7a6e1b297ea7b]
# to [e346183d0490498d8bc028de420c85e7ebc55cac]
#
# patch "mt_version.cc"
# from [45d1e20bb6dc2d490854f8647d9788bc3ae04654]
# to [244fa7c8fa1c1d2f65c9ee9c2542e95f8e284655]
#
# patch "netcmd.cc"
# from [da3773c93069c0834ce0b3caf34020305dd7d8b2]
# to [fd1bcbe65cd56e1a9379aa56c6039df9eac81b48]
#
# patch "netsync.cc"
# from [e56f12d1ea09ad60c675560199cb7819d258c025]
# to [cfd4bd5fa05d58c54c8ddf1cfe706120f6212e87]
#
# patch "packet.cc"
# from [130498edfbbd9c527c89e6e6c5c2d8d6193d3590]
# to [dfdefb2d2808183cd0312752b2ee1f840bace11c]
#
# patch "paths.cc"
# from [5680d2c2726d5894fe21cc4c7dd9f149ba1efb32]
# to [e780082307c87355306be40b824b8827f1db7d43]
#
# patch "rcs_file.cc"
# from [f5dde1d6e49f0472b11704c24d36a0bcb161f257]
# to [e6ecf0169b282a6f96e278227893eef9ca10680e]
#
# patch "rcs_import.cc"
# from [36d442b3ce162881956dffeb17d9ff88d70ea794]
# to [cc36f7773a0101a51850cc9f218b6f5dd6272a24]
#
# patch "refiner.cc"
# from [baa7581b2a14e8f76c65e90aa682bd54a44cb292]
# to [f2f64fe6722f3501ce1fea2d85516818070151e2]
#
# patch "revision.cc"
# from [e41a1f765fa1a8d68288a62f74da3215864ada44]
# to [ef6feacfdac32584040843bcd70f3bb66df67ad7]
#
# patch "roster.cc"
# from [5955f1339c8276c1ad2964abf5051b7d9cb13890]
# to [fde433622561c188f726b4b591388c070cc1e391]
#
# patch "roster_merge.cc"
# from [282aa6d4625b5276c3997aa6aca10faf15f69eea]
# to [ff845578375e028041eb8731d2f870ebfcad9eeb]
#
# patch "sanity.cc"
# from [d10487eb6479f58640483c72bb8c207903b0ade1]
# to [eb02236fb3eb72ac83d3b4a30cadad147aec9895]
#
# patch "schema_migration.cc"
# from [f820f10c5c4992df47354a9653d89a6de21c3a87]
# to [e26608a79f5d7cef3cc41da156dfc6d5f5f840db]
#
# patch "selectors.cc"
# from [a179c48e93ea4c6e98558ce1f4b76da04ae88e61]
# to [6e4fa17b861b28cc469b4adcdcdc58206ab2d654]
#
# patch "simplestring_xform.cc"
# from [b6d21fa059da420562b8dfea0e833b1806d7766d]
# to [dcf9e39b1a9c1bb565447a90f6068bdf82a7ee68]
#
# patch "ui.cc"
# from [180abc6a7a5e1e7c552abc05d8d1806aebcae786]
# to [cd43c58d97f3b15c45109e37c1fdad43ba84284a]
#
# patch "unit_tests.cc"
# from [9b3d3fd11212c735e093314f79e5fbd76977ebab]
# to [56038ad7f4260307ce03f9e4313720c59466f564]
#
# patch "vocab.cc"
# from [734ef82435e130f3ba3674573a142a8a980de91c]
# to [cd9e884668636ad1bb7cd1607d5d2fd0015fd307]
#
# patch "vocab_macros.hh"
# from [5f7434315d7ad936ca9bf09abe40899eb7e44d49]
# to [c82a27c1bc1c60037141f7e4bc26c00edc7d61a4]
#
# patch "work.cc"
# from [0d47dba0c23494d21067c094cca9cba8acd71f3b]
# to [afe3d7f65d252d2874181c2c43d50f95d51c8d61]
#
# patch "xdelta.cc"
# from [0dba3763bd06fe89479c0c91d649eaba2b9e37ba]
# to [0d2c397234e43b065c82f446ac4192803dec1146]
#
============================================================
--- ChangeLog 81e3045140bcbf832f6fe43481c287b7343f50a8
+++ ChangeLog effebb6d6ea8613ca6a0306794ceaf91df89128a
@@ -1,3 +1,66 @@
+2006-05-27 Derek Scherger
+
+ * annotate.cc:
+ * app_state.cc:
+ * automate.cc:
+ * basic_io.cc:
+ * cert.cc:
+ * charset.cc:
+ * cmd_automate.cc:
+ * cmd_db.cc:
+ * cmd_diff_log.cc:
+ * cmd_list.cc:
+ * cmd_merging.cc:
+ * cmd_netsync.cc:
+ * cmd_packet.cc:
+ * cmd_ws_commit.cc:
+ * commands.cc:
+ * constants.cc:
+ * crypto_tests.cc:
+ * cset.cc:
+ * database.cc:
+ * database_check.cc:
+ * diff_patch.cc:
+ * enumerator.cc:
+ * epoch.cc:
+ * file_io.cc:
+ * globish.cc:
+ * hmac.cc:
+ * inodeprint.cc:
+ * key_store.cc:
+ * keys.cc:
+ * lcs.cc:
+ * legacy.cc:
+ * lua.cc:
+ * lua_hooks.cc:
+ * main.cc:
+ * merge.cc:
+ * merkle_tree.cc:
+ * mkstemp.cc:
+ * monotone.cc:
+ * mt_version.cc:
+ * netcmd.cc:
+ * netsync.cc:
+ * packet.cc:
+ * paths.cc:
+ * rcs_file.cc:
+ * rcs_import.cc:
+ * refiner.cc:
+ * revision.cc:
+ * roster.cc:
+ * roster_merge.cc:
+ * sanity.cc:
+ * schema_migration.cc:
+ * selectors.cc:
+ * simplestring_xform.cc:
+ * ui.cc:
+ * unit_tests.cc:
+ * vocab.cc:
+ * vocab_macros.hh:
+ * work.cc:
+ * xdelta.cc:
+ namespace fixups as described in the ROADMAP
+
2006-05-26 Derek Scherger
* automate.cc (automate_get_manifest_of, automate_get_file):
============================================================
--- annotate.cc 5f284a607f1ef3dd5a7e192a58c396ce135ae9ff
+++ annotate.cc f77b679344341a4af199df9b59efd49b4676a6af
@@ -26,7 +26,21 @@
#include "cert.hh"
#include "ui.hh"
+using std::auto_ptr;
+using std::back_insert_iterator;
+using std::back_inserter;
+using std::cout;
+using std::deque;
+using std::endl;
+using std::make_pair;
+using std::map;
+using std::min;
+using std::set;
+using std::string;
+using std::vector;
+using boost::shared_ptr;
+
class annotate_lineage_mapping;
@@ -34,7 +48,7 @@
public:
annotate_context(file_id fid, app_state &app);
- boost::shared_ptr initial_lineage() const;
+ shared_ptr initial_lineage() const;
/// credit any uncopied lines (as recorded in copied_lines) to
/// rev, and reset copied_lines.
@@ -51,27 +65,27 @@
void dump(app_state & app) const;
- std::string get_line(int line_index) const { return file_lines[line_index]; }
+ string get_line(int line_index) const { return file_lines[line_index]; }
private:
- void build_revisions_to_annotations(app_state & app, std::map & revs_to_notations) const;
+ void build_revisions_to_annotations(app_state & app, map & revs_to_notations) const;
- std::vector file_lines;
- std::vector annotations;
+ vector file_lines;
+ vector annotations;
/// equivalent_lines[n] = m means that line n should be blamed to the same
/// revision as line m
- std::map equivalent_lines;
+ map equivalent_lines;
/// keep a count so we can tell quickly whether we can terminate
size_t annotated_lines_completed;
// elements of the set are indexes into the array of lines in the UDOI
// lineages add entries here when they notice that they copied a line from the UDOI
- std::set copied_lines;
+ set copied_lines;
// similarly, lineages add entries here for all the lines from the UDOI they know about that they didn't copy
- std::set touched_lines;
+ set touched_lines;
};
@@ -84,31 +98,31 @@
class annotate_lineage_mapping {
public:
annotate_lineage_mapping(const file_data &data);
- annotate_lineage_mapping(const std::vector &lines);
+ annotate_lineage_mapping(const vector &lines);
// debugging
//bool equal_interned (const annotate_lineage_mapping &rhs) const;
/// need a better name. does the work of setting copied bits in the context object.
- boost::shared_ptr
- build_parent_lineage(boost::shared_ptr acp, revision_id parent_rev, const file_data &parent_data) const;
+ shared_ptr
+ build_parent_lineage(shared_ptr acp, revision_id parent_rev, const file_data &parent_data) const;
- void merge(const annotate_lineage_mapping &other, const boost::shared_ptr &acp);
+ void merge(const annotate_lineage_mapping &other, const shared_ptr &acp);
- void credit_mapped_lines (boost::shared_ptr acp) const;
- void set_copied_all_mapped (boost::shared_ptr acp) const;
+ void credit_mapped_lines(shared_ptr acp) const;
+ void set_copied_all_mapped(shared_ptr acp) const;
private:
- void init_with_lines(const std::vector &lines);
+ void init_with_lines(const vector &lines);
static interner in; // FIX, testing hack
- std::vector file_interned;
+ vector file_interned;
// maps an index into the vector of lines for our current version of the file
// into an index into the vector of lines of the UDOI:
// eg. if the line file_interned[i] will turn into line 4 in the UDOI, mapping[i] = 4
- std::vector mapping;
+ vector mapping;
};
@@ -121,23 +135,23 @@
childrev -> parentrevN edges.
*/
struct annotate_node_work {
- annotate_node_work (boost::shared_ptr annotations_,
- boost::shared_ptr lineage_,
- revision_id revision_, node_id fid_)//, file_path node_fpath_)
+ annotate_node_work(shared_ptr annotations_,
+ shared_ptr lineage_,
+ revision_id revision_, node_id fid_)//, file_path node_fpath_)
: annotations(annotations_),
lineage(lineage_),
revision(revision_),
fid(fid_)//, node_fpath(node_fpath_)
{}
- annotate_node_work (const annotate_node_work &w)
+ annotate_node_work(const annotate_node_work &w)
: annotations(w.annotations),
lineage(w.lineage),
revision(w.revision),
fid(w.fid) //, node_fpath(w.node_fpath)
{}
- boost::shared_ptr annotations;
- boost::shared_ptr lineage;
+ shared_ptr annotations;
+ shared_ptr lineage;
revision_id revision;
//file_id node_fid;
node_id fid;
@@ -147,7 +161,7 @@
class lineage_merge_node {
public:
- typedef boost::shared_ptr splm;
+ typedef shared_ptr splm;
lineage_merge_node(const lineage_merge_node &m)
: work(m.work), incoming_edges(m.incoming_edges), completed_edges(m.completed_edges)
@@ -157,7 +171,7 @@
: work(wu), incoming_edges(incoming), completed_edges(1)
{}
- void merge(splm incoming, const boost::shared_ptr &acp)
+ void merge(splm incoming, const shared_ptr &acp)
{
work.lineage->merge(*incoming, acp); completed_edges++;
}
@@ -182,7 +196,7 @@
// initialize file_lines
file_data fpacked;
app.db.get_file_version(fid, fpacked);
- std::string encoding = constants::default_encoding; // FIXME
+ string encoding = constants::default_encoding; // FIXME
split_into_lines(fpacked.inner()(), encoding, file_lines);
L(FL("annotate_context::annotate_context initialized with %d file lines\n") % file_lines.size());
@@ -199,10 +213,10 @@
}
-boost::shared_ptr
+shared_ptr
annotate_context::initial_lineage() const
{
- boost::shared_ptr res(new annotate_lineage_mapping(file_lines));
+ shared_ptr res(new annotate_lineage_mapping(file_lines));
return res;
}
@@ -215,12 +229,12 @@
I(touched_lines.size() <= annotations.size());
// Find the lines that we touched but that no other parent copied.
- std::set credit_lines;
- std::set_difference(touched_lines.begin(), touched_lines.end(),
- copied_lines.begin(), copied_lines.end(),
- inserter(credit_lines, credit_lines.begin()));
+ set credit_lines;
+ set_difference(touched_lines.begin(), touched_lines.end(),
+ copied_lines.begin(), copied_lines.end(),
+ inserter(credit_lines, credit_lines.begin()));
- std::set::const_iterator i;
+ set::const_iterator i;
for (i = credit_lines.begin(); i != credit_lines.end(); i++) {
I(*i < annotations.size());
if (annotations[*i] == nullid) {
@@ -273,7 +287,7 @@
for (size_t i=0; i::const_iterator j = equivalent_lines.find(i);
+ map::const_iterator j = equivalent_lines.find(i);
if (j == equivalent_lines.end()) {
L(FL("annotate_equivalent_lines unable to find equivalent for line %d\n") % i);
}
@@ -295,26 +309,26 @@
}
-std::string cert_string_value (std::vector< revision > const & certs,
- const std::string & name,
- bool from_start, bool from_end,
- const std::string & sep)
+string cert_string_value(vector< revision > const & certs,
+ const string & name,
+ bool from_start, bool from_end,
+ const string & sep)
{
- for (std::vector < revision < cert > >::const_iterator i = certs.begin ();
+ for (vector < revision < cert > >::const_iterator i = certs.begin ();
i != certs.end (); ++i)
{
if (i->inner ().name () == name)
{
cert_value tv;
decode_base64 (i->inner ().value, tv);
- std::string::size_type f = 0;
- std::string::size_type l = std::string::npos;
+ string::size_type f = 0;
+ string::size_type l = string::npos;
if (from_start)
l = tv ().find_first_of (sep);
if (from_end)
{
f = tv ().find_last_of (sep);
- if (f == std::string::npos)
+ if (f == string::npos)
f = 0;
}
return tv ().substr (f, l);
@@ -327,13 +341,13 @@
void
annotate_context::build_revisions_to_annotations(app_state &app,
- std::map &revs_to_notations) const
+ map &revs_to_notations) const
{
I(annotations.size() == file_lines.size());
// build set of unique revisions present in annotations
- std::set seen;
- for (std::vector::const_iterator i = annotations.begin(); i != annotations.end(); i++)
+ set seen;
+ for (vector::const_iterator i = annotations.begin(); i != annotations.end(); i++)
{
seen.insert(*i);
}
@@ -341,16 +355,16 @@
size_t max_note_length = 0;
// build revision -> annotation string mapping
- for (std::set::const_iterator i = seen.begin(); i != seen.end(); i++)
+ for (set::const_iterator i = seen.begin(); i != seen.end(); i++)
{
- std::vector< revision > certs;
+ vector< revision > certs;
app.db.get_revision_certs(*i, certs);
erase_bogus_certs(certs, app);
- std::string author(cert_string_value(certs, author_cert_name, true, false, "@< "));
- std::string date(cert_string_value(certs, date_cert_name, true, false, "T"));
+ string author(cert_string_value(certs, author_cert_name, true, false, "@< "));
+ string date(cert_string_value(certs, date_cert_name, true, false, "T"));
- std::string result;
+ string result;
result.append((*i).inner ()().substr(0, 8));
result.append(".. by ");
result.append(author);
@@ -363,10 +377,10 @@
}
// justify annotation strings
- for (std::map::iterator i = revs_to_notations.begin(); i != revs_to_notations.end(); i++)
+ for (map::iterator i = revs_to_notations.begin(); i != revs_to_notations.end(); i++)
{
size_t l = i->second.size();
- i->second.insert(std::string::size_type(0), max_note_length - l, ' ');
+ i->second.insert(string::size_type(0), max_note_length - l, ' ');
}
}
@@ -377,13 +391,13 @@
revision_id nullid;
I(annotations.size() == file_lines.size());
- std::map revs_to_notations;
- std::string empty_note;
+ map revs_to_notations;
+ string empty_note;
if (global_sanity.brief)
{
build_revisions_to_annotations(app, revs_to_notations);
size_t max_note_length = revs_to_notations.begin()->second.size();
- empty_note.insert(std::string::size_type(0), max_note_length - 2, ' ');
+ empty_note.insert(string::size_type(0), max_note_length - 2, ' ');
}
revision_id lastid = nullid;
@@ -393,13 +407,13 @@
if (global_sanity.brief)
{
if (lastid == annotations[i])
- std::cout << empty_note << ": " << file_lines[i] << std::endl;
+ cout << empty_note << ": " << file_lines[i] << endl;
else
- std::cout << revs_to_notations[annotations[i]] << file_lines[i] << std::endl;
+ cout << revs_to_notations[annotations[i]] << file_lines[i] << endl;
lastid = annotations[i];
}
else
- std::cout << annotations[i] << ": " << file_lines[i] << std::endl;
+ cout << annotations[i] << ": " << file_lines[i] << endl;
}
}
@@ -407,14 +421,14 @@
annotate_lineage_mapping::annotate_lineage_mapping(const file_data &data)
{
// split into lines
- std::vector lines;
- std::string encoding = constants::default_encoding; // FIXME
+ vector lines;
+ string encoding = constants::default_encoding; // FIXME
split_into_lines (data.inner()().data(), encoding, lines);
init_with_lines(lines);
}
-annotate_lineage_mapping::annotate_lineage_mapping(const std::vector &lines)
+annotate_lineage_mapping::annotate_lineage_mapping(const vector &lines)
{
init_with_lines(lines);
}
@@ -431,7 +445,7 @@
result = false;
}
- size_t limit = std::min(file_interned.size(), rhs.file_interned.size());
+ size_t limit = min(file_interned.size(), rhs.file_interned.size());
for (size_t i=0; i &lines)
+annotate_lineage_mapping::init_with_lines(const vector &lines)
{
file_interned.clear();
file_interned.reserve(lines.size());
@@ -453,7 +467,7 @@
mapping.reserve(lines.size());
int count;
- std::vector::const_iterator i;
+ vector::const_iterator i;
for (count=0, i = lines.begin(); i != lines.end(); i++, count++) {
file_interned.push_back(in.intern(*i));
mapping.push_back(count);
@@ -462,29 +476,29 @@
}
-boost::shared_ptr
-annotate_lineage_mapping::build_parent_lineage (boost::shared_ptr acp,
- revision_id parent_rev,
- const file_data &parent_data) const
+shared_ptr
+annotate_lineage_mapping::build_parent_lineage(shared_ptr acp,
+ revision_id parent_rev,
+ const file_data &parent_data) const
{
bool verbose = false;
- boost::shared_ptr parent_lineage(new annotate_lineage_mapping(parent_data));
+ shared_ptr parent_lineage(new annotate_lineage_mapping(parent_data));
- std::vector lcs;
- std::back_insert_iterator< std::vector > bii(lcs);
+ vector lcs;
+ back_insert_iterator< vector > bii(lcs);
longest_common_subsequence(file_interned.begin(),
file_interned.end(),
parent_lineage->file_interned.begin(),
parent_lineage->file_interned.end(),
- std::min(file_interned.size(), parent_lineage->file_interned.size()),
- std::back_inserter(lcs));
+ min(file_interned.size(), parent_lineage->file_interned.size()),
+ back_inserter(lcs));
if (verbose)
L(FL("build_parent_lineage: file_lines.size() == %d, parent.file_lines.size() == %d, lcs.size() == %d\n")
% file_interned.size() % parent_lineage->file_interned.size() % lcs.size());
// do the copied lines thing for our annotate_context
- std::vector lcs_src_lines;
+ vector lcs_src_lines;
lcs_src_lines.resize(lcs.size());
size_t i, j;
i = j = 0;
@@ -544,8 +558,8 @@
void
-annotate_lineage_mapping::merge (const annotate_lineage_mapping &other,
- const boost::shared_ptr &acp)
+annotate_lineage_mapping::merge(const annotate_lineage_mapping &other,
+ const shared_ptr &acp)
{
I(file_interned.size() == other.file_interned.size());
I(mapping.size() == other.mapping.size());
@@ -569,9 +583,9 @@
}
void
-annotate_lineage_mapping::credit_mapped_lines (boost::shared_ptr acp) const
+annotate_lineage_mapping::credit_mapped_lines (shared_ptr acp) const
{
- std::vector::const_iterator i;
+ vector::const_iterator i;
for (i=mapping.begin(); i != mapping.end(); i++) {
acp->set_touched(*i);
}
@@ -579,9 +593,9 @@
void
-annotate_lineage_mapping::set_copied_all_mapped (boost::shared_ptr acp) const
+annotate_lineage_mapping::set_copied_all_mapped (shared_ptr acp) const
{
- std::vector::const_iterator i;
+ vector::const_iterator i;
for (i=mapping.begin(); i != mapping.end(); i++) {
acp->set_copied(*i);
}
@@ -591,20 +605,20 @@
static void
do_annotate_node (const annotate_node_work &work_unit,
app_state &app,
- std::deque &nodes_to_process,
- std::set &nodes_complete,
- const std::map &paths_to_nodes,
- std::map &pending_merge_nodes)
+ deque &nodes_to_process,
+ set &nodes_complete,
+ const map &paths_to_nodes,
+ map &pending_merge_nodes)
{
L(FL("do_annotate_node for node %s\n") % work_unit.revision);
I(nodes_complete.find(work_unit.revision) == nodes_complete.end());
- // nodes_seen.insert(std::make_pair(work_unit.revision, work_unit.lineage));
+ // nodes_seen.insert(make_pair(work_unit.revision, work_unit.lineage));
roster_t roster;
marking_map markmap;
app.db.get_roster(work_unit.revision, roster, markmap);
marking_t marks;
- std::map::const_iterator mmi = markmap.find(work_unit.fid);
+ map::const_iterator mmi = markmap.find(work_unit.fid);
I(mmi != markmap.end());
marks = mmi->second;
@@ -617,7 +631,7 @@
return;
}
- std::set parents;
+ set parents;
// If we have content-marks which are *not* equal to the current rev,
// we can jump back to them directly. If we have only a content-mark
@@ -637,7 +651,7 @@
size_t added_in_parent_count = 0;
- for (std::set::const_iterator i = parents.begin();
+ for (set::const_iterator i = parents.begin();
i != parents.end(); i++)
{
revision_id parent_revision = *i;
@@ -661,7 +675,7 @@
file_t file_in_child = downcast_to_file_t(roster.get_node(work_unit.fid));
file_t file_in_parent = downcast_to_file_t(parent_roster.get_node(work_unit.fid));
- boost::shared_ptr parent_lineage;
+ shared_ptr parent_lineage;
if (file_in_parent->content == file_in_child->content)
{
@@ -681,7 +695,7 @@
// If this parent has not yet been queued for processing, create the
// work unit for it.
- std::map::iterator lmn
+ map::iterator lmn
= pending_merge_nodes.find(parent_revision);
if (lmn == pending_merge_nodes.end())
@@ -693,13 +707,13 @@
parent_revision,
work_unit.fid);
- std::map::const_iterator ptn
+ map::const_iterator ptn
= paths_to_nodes.find(parent_revision);
if (ptn->second > 1)
{
lineage_merge_node nmn(newunit, ptn->second);
- pending_merge_nodes.insert(std::make_pair(parent_revision, nmn));
+ pending_merge_nodes.insert(make_pair(parent_revision, nmn));
L(FL("put new merge node on pending_merge_nodes for parent %s\n")
% parent_revision);
// just checking...
@@ -741,9 +755,9 @@
void
-find_ancestors(app_state &app, revision_id rid, std::map &paths_to_nodes)
+find_ancestors(app_state &app, revision_id rid, map &paths_to_nodes)
{
- std::vector frontier;
+ vector frontier;
frontier.push_back(rid);
while (!frontier.empty())
@@ -751,16 +765,16 @@
revision_id rid = frontier.back();
frontier.pop_back();
if(!null_id(rid)) {
- std::set parents;
+ set parents;
app.db.get_revision_parents(rid, parents);
- for (std::set::const_iterator i = parents.begin();
+ for (set::const_iterator i = parents.begin();
i != parents.end(); ++i)
{
- std::map::iterator found = paths_to_nodes.find(*i);
+ map::iterator found = paths_to_nodes.find(*i);
if (found == paths_to_nodes.end())
{
frontier.push_back(*i);
- paths_to_nodes.insert(std::make_pair(*i, 1));
+ paths_to_nodes.insert(make_pair(*i, 1));
}
else
{
@@ -776,20 +790,20 @@
{
L(FL("annotating file %s with content %s in revision %s\n") % file_node->self % file_node->content % rid);
- boost::shared_ptr acp(new annotate_context(file_node->content, app));
- boost::shared_ptr lineage = acp->initial_lineage();
+ shared_ptr acp(new annotate_context(file_node->content, app));
+ shared_ptr lineage = acp->initial_lineage();
- std::set nodes_complete;
- std::map paths_to_nodes;
- std::map pending_merge_nodes;
+ set nodes_complete;
+ map paths_to_nodes;
+ map pending_merge_nodes;
find_ancestors(app, rid, paths_to_nodes);
// build node work unit
- std::deque nodes_to_process;
+ deque nodes_to_process;
annotate_node_work workunit(acp, lineage, rid, file_node->self); //, fpath);
nodes_to_process.push_back(workunit);
- std::auto_ptr revs_ticker(new ticker(N_("revs done"), "r", 1));
+ auto_ptr revs_ticker(new ticker(N_("revs done"), "r", 1));
revs_ticker->set_total(paths_to_nodes.size() + 1);
while (nodes_to_process.size() && !acp->is_complete())
{
============================================================
--- app_state.cc 6a8be1d3051e8e84464730d9d4f60cc9abca098d
+++ app_state.cc 55a92b8e0df6ce29b219a3efef760983fa57c9ae
@@ -17,6 +17,10 @@
#include "work.hh"
#include "platform.hh"
+using std::exception;
+using std::map;
+using std::string;
+
// copyright (C) 2002, 2003 graydon hoare
// all rights reserved.
// licensed to the public under the terms of the GNU GPL (>= 2)
@@ -42,7 +46,7 @@
db.set_app(this);
lua.set_app(this);
keys.set_key_dir(confdir / "keys");
- set_prog_name(utf8(std::string("mtn")));
+ set_prog_name(utf8(string("mtn")));
}
app_state::~app_state()
@@ -58,7 +62,7 @@
bool
app_state::is_explicit_option(int option_id) const
{
- std::map::const_iterator i = explicit_option_map.find(option_id);
+ map::const_iterator i = explicit_option_map.find(option_id);
if (i == explicit_option_map.end()) return false;
return i->second;
}
@@ -113,7 +117,7 @@
}
void
-app_state::require_workspace(std::string const & explanation)
+app_state::require_workspace(string const & explanation)
{
N(found_workspace,
F("workspace required but not found%s%s")
@@ -239,14 +243,14 @@
// form 20000101T120000, but not "extended" ISO times, of the form
// 2000-01-01T12:00:00. So do something stupid to convert one to the
// other.
- std::string tmp = d();
- std::string::size_type pos = 0;
+ string tmp = d();
+ string::size_type pos = 0;
while ((pos = tmp.find_first_of("-:")) != string::npos)
tmp.erase(pos, 1);
date = boost::posix_time::from_iso_string(tmp);
date_set = true;
}
- catch (std::exception &e)
+ catch (exception &e)
{
N(false, F("failed to parse date string '%s': %s") % d % e.what());
}
@@ -412,7 +416,7 @@
read_options_map(dat, options);
}
}
- catch(std::exception & e)
+ catch(exception & e)
{
W(F("Failed to read options file %s") % o_path);
}
@@ -429,7 +433,7 @@
write_options_map(dat, options);
write_data(o_path, dat);
}
- catch(std::exception & e)
+ catch(exception & e)
{
W(F("Failed to write options file %s") % o_path);
}
============================================================
--- automate.cc 8aa16555b3ed2bbd87bbdb395a2d23a790803f19
+++ automate.cc 7d53f42255a88e41a2b1bd27c85aff62ecad823d
@@ -28,8 +28,26 @@
#include "packet.hh"
#include "cert.hh"
-static std::string const interface_version = "2.1";
+using std::allocator;
+using std::basic_stringbuf;
+using std::basic_ios;
+using std::char_traits;
+using std::endl;
+using std::inserter;
+using std::make_pair;
+using std::map;
+using std::multimap;
+using std::ostream;
+using std::ostringstream;
+using std::pair;
+using std::set;
+using std::sort;
+using std::streamsize;
+using std::string;
+using std::vector;
+static string const interface_version = "2.1";
+
// Name: interface_version
// Arguments: none
// Added in: 0.0
@@ -40,15 +58,15 @@
// "[0-9]+\.[0-9]+\n".
// Error conditions: None.
static void
-automate_interface_version(std::vector args,
- std::string const & help_name,
+automate_interface_version(vector args,
+ string const & help_name,
app_state & app,
- std::ostream & output)
+ ostream & output)
{
if (args.size() != 0)
throw usage(help_name);
- output << interface_version << std::endl;
+ output << interface_version << endl;
}
// Name: heads
@@ -61,10 +79,10 @@
// Error conditions: If the branch does not exist, prints nothing. (There are
// no heads.)
static void
-automate_heads(std::vector args,
- std::string const & help_name,
+automate_heads(vector args,
+ string const & help_name,
app_state & app,
- std::ostream & output)
+ ostream & output)
{
if (args.size() > 1)
throw usage(help_name);
@@ -73,10 +91,10 @@
// branchname was explicitly given, use that
app.set_branch(idx(args, 0));
}
- std::set heads;
+ set heads;
get_branch_heads(app.branch_name(), app, heads);
- for (std::set::const_iterator i = heads.begin(); i != heads.end(); ++i)
- output << (*i).inner()() << std::endl;
+ for (set::const_iterator i = heads.begin(); i != heads.end(); ++i)
+ output << (*i).inner()() << endl;
}
// Name: ancestors
@@ -89,17 +107,17 @@
// Error conditions: If any of the revisions do not exist, prints nothing to
// stdout, prints an error message to stderr, and exits with status 1.
static void
-automate_ancestors(std::vector args,
- std::string const & help_name,
- app_state & app,
- std::ostream & output)
+automate_ancestors(vector args,
+ string const & help_name,
+ app_state & app,
+ ostream & output)
{
if (args.size() == 0)
throw usage(help_name);
- std::set ancestors;
- std::vector frontier;
- for (std::vector::const_iterator i = args.begin(); i != args.end(); ++i)
+ set ancestors;
+ vector frontier;
+ for (vector::const_iterator i = args.begin(); i != args.end(); ++i)
{
revision_id rid((*i)());
N(app.db.revision_exists(rid), F("No such revision %s") % rid);
@@ -110,9 +128,9 @@
revision_id rid = frontier.back();
frontier.pop_back();
if(!null_id(rid)) {
- std::set parents;
+ set parents;
app.db.get_revision_parents(rid, parents);
- for (std::set::const_iterator i = parents.begin();
+ for (set::const_iterator i = parents.begin();
i != parents.end(); ++i)
{
if (ancestors.find(*i) == ancestors.end())
@@ -123,10 +141,10 @@
}
}
}
- for (std::set::const_iterator i = ancestors.begin();
+ for (set::const_iterator i = ancestors.begin();
i != ancestors.end(); ++i)
if (!null_id(*i))
- output << (*i).inner()() << std::endl;
+ output << (*i).inner()() << endl;
}
@@ -140,17 +158,17 @@
// Error conditions: If any of the revisions do not exist, prints nothing to
// stdout, prints an error message to stderr, and exits with status 1.
static void
-automate_descendents(std::vector args,
- std::string const & help_name,
+automate_descendents(vector args,
+ string const & help_name,
app_state & app,
- std::ostream & output)
+ ostream & output)
{
if (args.size() == 0)
throw usage(help_name);
- std::set descendents;
- std::vector frontier;
- for (std::vector::const_iterator i = args.begin(); i != args.end(); ++i)
+ set descendents;
+ vector frontier;
+ for (vector::const_iterator i = args.begin(); i != args.end(); ++i)
{
revision_id rid((*i)());
N(app.db.revision_exists(rid), F("No such revision %s") % rid);
@@ -160,9 +178,9 @@
{
revision_id rid = frontier.back();
frontier.pop_back();
- std::set children;
+ set children;
app.db.get_revision_children(rid, children);
- for (std::set::const_iterator i = children.begin();
+ for (set::const_iterator i = children.begin();
i != children.end(); ++i)
{
if (descendents.find(*i) == descendents.end())
@@ -172,9 +190,9 @@
}
}
}
- for (std::set::const_iterator i = descendents.begin();
+ for (set::const_iterator i = descendents.begin();
i != descendents.end(); ++i)
- output << (*i).inner()() << std::endl;
+ output << (*i).inner()() << endl;
}
@@ -192,21 +210,21 @@
// Error conditions: If any of the revisions do not exist, prints nothing to
// stdout, prints an error message to stderr, and exits with status 1.
static void
-automate_erase_ancestors(std::vector args,
- std::string const & help_name,
+automate_erase_ancestors(vector args,
+ string const & help_name,
app_state & app,
- std::ostream & output)
+ ostream & output)
{
- std::set revs;
- for (std::vector::const_iterator i = args.begin(); i != args.end(); ++i)
+ set revs;
+ for (vector::const_iterator i = args.begin(); i != args.end(); ++i)
{
revision_id rid((*i)());
N(app.db.revision_exists(rid), F("No such revision %s") % rid);
revs.insert(rid);
}
erase_ancestors(revs, app);
- for (std::set::const_iterator i = revs.begin(); i != revs.end(); ++i)
- output << (*i).inner()() << std::endl;
+ for (set::const_iterator i = revs.begin(); i != revs.end(); ++i)
+ output << (*i).inner()() << endl;
}
// Name: attributes
@@ -219,10 +237,10 @@
// or a list of attributes if a file name provided.
// Error conditions: If the file name has no attributes, prints nothing.
static void
-automate_attributes(std::vector args,
- std::string const & help_name,
+automate_attributes(vector args,
+ string const & help_name,
app_state & app,
- std::ostream & output)
+ ostream & output)
{
if (args.size() > 1)
throw usage(help_name);
@@ -244,7 +262,7 @@
for (full_attr_map_t::const_iterator i = n->attrs.begin();
i != n->attrs.end(); ++i)
if (i->second.first)
- output << i->first << std::endl;
+ output << i->first << endl;
}
}
else
@@ -256,7 +274,7 @@
{
split_path path;
current.get_name(i->first, path);
- output << file_path(path) << std::endl;
+ output << file_path(path) << endl;
}
}
}
@@ -273,23 +291,23 @@
// Error conditions: If any of the revisions do not exist, prints nothing to
// stdout, prints an error message to stderr, and exits with status 1.
static void
-automate_toposort(std::vector args,
- std::string const & help_name,
+automate_toposort(vector args,
+ string const & help_name,
app_state & app,
- std::ostream & output)
+ ostream & output)
{
- std::set revs;
- for (std::vector::const_iterator i = args.begin(); i != args.end(); ++i)
+ set revs;
+ for (vector::const_iterator i = args.begin(); i != args.end(); ++i)
{
revision_id rid((*i)());
N(app.db.revision_exists(rid), F("No such revision %s") % rid);
revs.insert(rid);
}
- std::vector sorted;
+ vector sorted;
toposort(revs, sorted, app);
- for (std::vector::const_iterator i = sorted.begin();
+ for (vector::const_iterator i = sorted.begin();
i != sorted.end(); ++i)
- output << (*i).inner()() << std::endl;
+ output << (*i).inner()() << endl;
}
// Name: ancestry_difference
@@ -309,17 +327,17 @@
// Error conditions: If any of the revisions do not exist, prints nothing to
// stdout, prints an error message to stderr, and exits with status 1.
static void
-automate_ancestry_difference(std::vector args,
- std::string const & help_name,
+automate_ancestry_difference(vector args,
+ string const & help_name,
app_state & app,
- std::ostream & output)
+ ostream & output)
{
if (args.size() == 0)
throw usage(help_name);
revision_id a;
- std::set bs;
- std::vector::const_iterator i = args.begin();
+ set bs;
+ vector::const_iterator i = args.begin();
a = revision_id((*i)());
N(app.db.revision_exists(a), F("No such revision %s") % a);
for (++i; i != args.end(); ++i)
@@ -328,14 +346,14 @@
N(app.db.revision_exists(b), F("No such revision %s") % b);
bs.insert(b);
}
- std::set ancestors;
+ set ancestors;
ancestry_difference(a, bs, ancestors, app);
- std::vector sorted;
+ vector sorted;
toposort(ancestors, sorted, app);
- for (std::vector::const_iterator i = sorted.begin();
+ for (vector::const_iterator i = sorted.begin();
i != sorted.end(); ++i)
- output << (*i).inner()() << std::endl;
+ output << (*i).inner()() << endl;
}
// Name: leaves
@@ -352,24 +370,24 @@
// newline. Revision ids are printed in alphabetically sorted order.
// Error conditions: None.
static void
-automate_leaves(std::vector args,
- std::string const & help_name,
- app_state & app,
- std::ostream & output)
+automate_leaves(vector args,
+ string const & help_name,
+ app_state & app,
+ ostream & output)
{
if (args.size() != 0)
throw usage(help_name);
// this might be more efficient in SQL, but for now who cares.
- std::set leaves;
+ set leaves;
app.db.get_revision_ids(leaves);
- std::multimap graph;
+ multimap graph;
app.db.get_revision_ancestry(graph);
- for (std::multimap::const_iterator i = graph.begin();
+ for (multimap::const_iterator i = graph.begin();
i != graph.end(); ++i)
leaves.erase(i->first);
- for (std::set::const_iterator i = leaves.begin(); i != leaves.end(); ++i)
- output << (*i).inner()() << std::endl;
+ for (set::const_iterator i = leaves.begin(); i != leaves.end(); ++i)
+ output << (*i).inner()() << endl;
}
// Name: parents
@@ -383,21 +401,21 @@
// Error conditions: If the revision does not exist, prints nothing to stdout,
// prints an error message to stderr, and exits with status 1.
static void
-automate_parents(std::vector args,
- std::string const & help_name,
+automate_parents(vector args,
+ string const & help_name,
app_state & app,
- std::ostream & output)
+ ostream & output)
{
if (args.size() != 1)
throw usage(help_name);
revision_id rid(idx(args, 0)());
N(app.db.revision_exists(rid), F("No such revision %s") % rid);
- std::set parents;
+ set parents;
app.db.get_revision_parents(rid, parents);
- for (std::set::const_iterator i = parents.begin();
+ for (set::const_iterator i = parents.begin();
i != parents.end(); ++i)
if (!null_id(*i))
- output << (*i).inner()() << std::endl;
+ output << (*i).inner()() << endl;
}
// Name: children
@@ -411,21 +429,21 @@
// Error conditions: If the revision does not exist, prints nothing to stdout,
// prints an error message to stderr, and exits with status 1.
static void
-automate_children(std::vector args,
- std::string const & help_name,
+automate_children(vector args,
+ string const & help_name,
app_state & app,
- std::ostream & output)
+ ostream & output)
{
if (args.size() != 1)
throw usage(help_name);
revision_id rid(idx(args, 0)());
N(app.db.revision_exists(rid), F("No such revision %s") % rid);
- std::set children;
+ set children;
app.db.get_revision_children(rid, children);
- for (std::set::const_iterator i = children.begin();
+ for (set::const_iterator i = children.begin();
i != children.end(); ++i)
if (!null_id(*i))
- output << (*i).inner()() << std::endl;
+ output << (*i).inner()() << endl;
}
// Name: graph
@@ -449,40 +467,40 @@
// within each line are alphabetically sorted.
// Error conditions: None.
static void
-automate_graph(std::vector args,
- std::string const & help_name,
+automate_graph(vector args,
+ string const & help_name,
app_state & app,
- std::ostream & output)
+ ostream & output)
{
if (args.size() != 0)
throw usage(help_name);
- std::multimap edges_mmap;
- std::map > child_to_parents;
+ multimap edges_mmap;
+ map > child_to_parents;
app.db.get_revision_ancestry(edges_mmap);
- for (std::multimap::const_iterator i = edges_mmap.begin();
+ for (multimap::const_iterator i = edges_mmap.begin();
i != edges_mmap.end(); ++i)
{
if (child_to_parents.find(i->second) == child_to_parents.end())
- child_to_parents.insert(std::make_pair(i->second, std::set()));
+ child_to_parents.insert(make_pair(i->second, set()));
if (null_id(i->first))
continue;
- std::map >::iterator
+ map >::iterator
j = child_to_parents.find(i->second);
I(j->first == i->second);
j->second.insert(i->first);
}
- for (std::map >::const_iterator i = child_to_parents.begin();
+ for (map