summaryrefslogtreecommitdiff
path: root/test/graph
diff options
context:
space:
mode:
authormzuenni <mzuenni@users.noreply.github.com>2024-07-28 22:54:40 +0200
committerGitHub <noreply@github.com>2024-07-28 22:54:40 +0200
commit8d11c6c8213f46f0fa19826917c255edd5d43cb1 (patch)
tree96d75baff33d5a04b5a60f1a41f514a26c716874 /test/graph
parent8c33b4e0d3030cfed17fc64b4fe41133339f6d87 (diff)
Test (#4)
* update * moved content in subdir * rename file * add test setup * add test setup * add github action * automaticly test all cpp files * timeout after 10s * setulimit and dont zero memory * test build pdf * install latexmk * update * update * ngerman * fonts * removed old code * add first test * added tests * test in sorted order * more tests * simplified test * more tests * fix suffix tree * fixes and improvements * done ust lst directly * fix swap * add links to pdf * fix constants * add primorial * add comment * various improvements * more tests * added missing stuf * more tests * fix tests * more tests * more tests * more tests * fix recursion? * test trie * more tests * only use python temporarily for listings * only use python temporarily for listings * more tests * fix longestCommonSubstring * more tests * more tests * made code more similiar * fix? * more tests * more tests * more tests * add ahoCorasick test + limit 4GB stack size * more tests * fix test * add additional test * more tests * more tests * fix? * better fix * fix virtual tree * more tests * more tests * recursive closest pair * more tests * decrease limit * new tests * more tests * fix name * more tests * add test * new test * more tests * more tests * more tests * more tests * new test and content * new code * new code * larger tests * fix and test * new test * new test * update pdf * remove comments * new test * more tests * more testcases * more tests * increased limit * more tests * more tests * more tests * new tests * more tests * shortened code * new test * add basic tests for bigint * more tests * removed old files * new test * ignore some files * more auto more ccw * fix test * more tests * fix * new tests * more tests * more tests * stronger test * actually verify delaunay... * more tests * fix header * more tests * run tests parallel? * test parralel? * add --missing * separate workflows * test * is the pdf checked? * separate workflows * fix workflow * more workflows --------- Co-authored-by: Yidi <noob999noob999@gmail.com>
Diffstat (limited to 'test/graph')
-rw-r--r--test/graph/2sat.cpp133
-rw-r--r--test/graph/LCA_sparse.cpp63
-rw-r--r--test/graph/TSP.cpp67
-rw-r--r--test/graph/articulationPoints.bcc.cpp78
-rw-r--r--test/graph/articulationPoints.bridges.cpp64
-rw-r--r--test/graph/articulationPoints.cpp85
-rw-r--r--test/graph/bellmannFord.cpp70
-rw-r--r--test/graph/bitonicTSP.cpp49
-rw-r--r--test/graph/bitonicTSPsimple.cpp49
-rw-r--r--test/graph/blossom.cpp76
-rw-r--r--test/graph/bronKerbosch.cpp73
-rw-r--r--test/graph/centroid.cpp77
-rw-r--r--test/graph/cycleCounting.cpp79
-rw-r--r--test/graph/dijkstra.cpp64
-rw-r--r--test/graph/dinicScaling.cpp61
-rw-r--r--test/graph/euler.cpp87
-rw-r--r--test/graph/floydWarshall.cpp90
-rw-r--r--test/graph/havelHakimi.cpp65
-rw-r--r--test/graph/hopcroftKarp.cpp74
-rw-r--r--test/graph/kruskal.cpp91
-rw-r--r--test/graph/matching.cpp62
-rw-r--r--test/graph/maxCarBiMatch.cpp74
-rw-r--r--test/graph/maxWeightBipartiteMatching.cpp59
-rw-r--r--test/graph/minCostMaxFlow.cpp68
-rw-r--r--test/graph/pushRelabel.cpp61
-rw-r--r--test/graph/scc.cpp92
-rw-r--r--test/graph/stoerWagner.cpp81
-rw-r--r--test/graph/treeIsomorphism.cpp126
28 files changed, 2118 insertions, 0 deletions
diff --git a/test/graph/2sat.cpp b/test/graph/2sat.cpp
new file mode 100644
index 0000000..fc3186e
--- /dev/null
+++ b/test/graph/2sat.cpp
@@ -0,0 +1,133 @@
+#include "../util.h"
+#include <graph/scc.cpp>
+#define static vector<vector<int>> adj; static // hacky...
+#include <graph/2sat.cpp>
+#undef static
+#undef adj
+
+struct RandomClause {
+ int a, b;
+ int type;
+ RandomClause(int n) :
+ a(Random::integer<int>(0, 2*n)),
+ b(Random::integer<int>(0, 2*n)),
+ type(Random::integer<int>(0, 8)) {}
+
+ bool eval(vector<int>& sol) const {
+ bool ba = sol[a];
+ bool bb = sol[b];
+ if (type == 0) return !ba || bb;
+ if (type == 1) return ba == bb;
+ if (type == 2) return ba || bb;
+ if (type == 3) return ba != bb;
+ if (type == 4) return ba && bb;
+ if (type == 5) return !(ba && bb);
+
+ if (type == 6) return ba;
+ if (type == 7) return !ba;
+ return false;
+ }
+
+ void add(sat2& sat) const {
+ int va = a;
+ int vb = b;
+ if (type == 0) sat.addImpl(va, vb);
+ if (type == 1) sat.addEquiv(va, vb);
+ if (type == 2) sat.addOr(va, vb);
+ if (type == 3) sat.addXor(va, vb);
+ if (type == 4) sat.addAnd(va, vb);
+ if (type == 5) sat.addNand(va, vb);
+
+ if (type == 6) sat.addTrue(va);
+ if (type == 7) sat.addFalse(va);
+ }
+
+ friend ostream& operator<<(ostream& os, const RandomClause& c) {
+ if (c.a & 1) os << "-";
+ os << (c.a >> 1);
+ if (c.type == 0) os << "=>";
+ if (c.type == 1) os << "==";
+ if (c.type == 2) os << "or";
+ if (c.type == 3) os << "xor";
+ if (c.type == 4) os << "and";
+ if (c.type == 5) os << "nand";
+
+ if (c.type == 6) return os;
+ if (c.type == 7) return os << "==F";
+
+ if (c.b & 1) os << "-";
+ os << (c.b >> 1);
+ return os;
+ }
+};
+
+bool naive(int n, const vector<RandomClause>& clauses) {
+ for (ll i = 0; i < (1ll << n); i++) {
+ vector<int> tmp(2*n);
+ for (ll j = 0; j < n; j++) {
+ tmp[(2*j) + ((i >> j) & 1)] = 1;
+ }
+ bool ok = true;
+ for (auto& c : clauses) ok &= c.eval(tmp);
+ if (ok) return true;
+ }
+ return false;
+}
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 200'000; tries++) {
+ int n = Random::integer<int>(1, 12);
+ int m = Random::integer<int>(0, 30);
+
+ vector<RandomClause> clauses;
+ for (int i = 0; i < m; i++) clauses.emplace_back(n);
+
+ sat2 sat(n);
+ for (auto& c : clauses) c.add(sat);
+ adj = sat.adj;
+
+ bool got = sat.solve();
+ bool expected = naive(n, clauses);
+
+ if (got) {
+ for (int i = 0; i < 2*n; i+=2) {
+ if (sat.sol[i] < 0) cerr << "error: invalid vars" << FAIL;
+ if (sat.sol[i+1] < 0) cerr << "error: invalid vars" << FAIL;
+ if (sat.sol[i] == sat.sol[i+1]) cerr << "error: inconsistent vars" << FAIL;
+ }
+ for (auto& c : clauses) {
+ if (!c.eval(sat.sol)) {
+ cerr << "error: inconsistent" << FAIL;
+ }
+ }
+ }
+
+ if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 200'000;
+constexpr int M = 500'000;
+void performance_test() {
+ timer t;
+ vector<RandomClause> clauses;
+ for (int i = 0; i < M; i++) clauses.emplace_back(N);
+ t.start();
+ sat2 sat(N);
+ for (auto& c : clauses) c.add(sat);
+ t.stop();
+ adj = sat.adj;
+ t.start();
+ hash_t hash = sat.solve();
+ t.stop();
+ if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/LCA_sparse.cpp b/test/graph/LCA_sparse.cpp
new file mode 100644
index 0000000..f6eb345
--- /dev/null
+++ b/test/graph/LCA_sparse.cpp
@@ -0,0 +1,63 @@
+#include "../util.h"
+#include <datastructures/sparseTable.cpp>
+#include <graph/LCA_sparse.cpp>
+namespace expected {
+#include <graph/hld.cpp>
+}
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 200'000; tries++) {
+ int n = Random::integer<int>(2, 30);
+ Graph<NoData> g(n);
+ g.tree();
+
+ vector<vector<int>> adj(n);
+ g.forEdges([&](int a, int b){
+ adj[a].push_back(b);
+ adj[b].push_back(a);
+ });
+
+ LCA lca;
+ lca.init(adj, 0);
+
+ expected::adj = adj;
+ expected::init();
+
+ for (int i = 0; i < n; i++) {
+ for (int j = 0; j <= i; j++) {
+ auto got = lca.getLCA(i, j);
+ auto expected = expected::get_lca(i, j);
+ if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
+ }
+ }
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 1'000'000;
+void performance_test() {
+ timer t;
+ Graph<NoData> g(N);
+ g.tree();
+ vector<vector<int>> adj(N);
+ g.forEdges([&](int a, int b){
+ adj[a].push_back(b);
+ adj[b].push_back(a);
+ });
+
+ hash_t hash = 0;
+ t.start();
+ LCA lca;
+ lca.init(adj, 0);
+ for (int i = 1; i < N; i++) hash += lca.getLCA(i-1, i);
+ t.stop();
+ if (t.time > 1000) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/TSP.cpp b/test/graph/TSP.cpp
new file mode 100644
index 0000000..f9aab2e
--- /dev/null
+++ b/test/graph/TSP.cpp
@@ -0,0 +1,67 @@
+#include "../util.h"
+struct edge {
+ ll dist;
+ int to;
+};
+constexpr ll INF = LL::INF;
+#include <graph/TSP.cpp>
+
+vector<int> naive() {
+ int n = sz(dist);
+ vector<int> todo(n - 1);
+ iota(all(todo), 1);
+ vector<int> res;
+ ll best = LL::INF;
+ do {
+ int last = 0;
+ ll cur = 0;
+ for (int x : todo) {
+ cur += dist[last][x];
+ last = x;
+ }
+ cur += dist[last][0];
+ if (cur < best) {
+ best = cur;
+ res = todo;
+ res.insert(res.begin(), 0);
+ res.push_back(0);
+ }
+ } while (next_permutation(all(todo)));
+ return res;
+}
+
+void stress_test() {
+ ll queries = 0;
+ for (ll i = 0; i < 100'000; i++) {
+ int n = Random::integer<int>(1, 9);
+ dist.assign(n, {});
+ for (auto& v : dist) v = Random::integers<ll>(n, 0, 1000'000'000);
+
+ auto expected = naive();
+ auto got = TSP();
+
+ if (got != expected) cerr << "error" << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 19;
+void performance_test() {
+ timer t;
+ dist.assign(N, {});
+ for (auto& v : dist) v = Random::integers<ll>(N, 0, 1000'000'000);
+ t.start();
+ auto got = TSP();
+ t.stop();
+
+ hash_t hash = 0;
+ for (int x : got) hash += x;
+ if (t.time > 1000) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/articulationPoints.bcc.cpp b/test/graph/articulationPoints.bcc.cpp
new file mode 100644
index 0000000..15f5cf2
--- /dev/null
+++ b/test/graph/articulationPoints.bcc.cpp
@@ -0,0 +1,78 @@
+#include "../util.h"
+struct edge {
+ ll from, to, id;
+};
+#define Edge edge
+#include <graph/articulationPoints.cpp>
+#undef Edge
+#include <datastructures/unionFind.cpp>
+
+vector<vector<int>> naiveBCC(int m) {
+ init(m);
+
+ vector<int> seen(sz(adj), -1);
+ int run = 0;
+ for (int i = 0; i < sz(adj); i++) {
+ for (auto e : adj[i]) {
+ run++;
+ seen[i] = run;
+ vector<ll> todo = {e.to};
+ seen[e.to] = run;
+ while (!todo.empty()) {
+ int c = todo.back();
+ todo.pop_back();
+ for (auto ee : adj[c]) {
+ if (seen[ee.to] == run) continue;
+ seen[ee.to] = run;
+ todo.push_back(ee.to);
+ }
+ }
+ for (auto ee : adj[i]) {
+ if (seen[ee.to] == run) unionSets(ee.id, e.id);
+ }
+ }
+ }
+ vector<vector<int>> res(m);
+ for (int i = 0; i < m; i++) {
+ res[findSet(i)].push_back(i);
+ }
+ for (auto& v : res) sort(all(v));
+ res.erase(remove_if(all(res), [](const vector<int>& v){return sz(v) <= 1;}), res.end());
+ sort(all(res));
+ return res;
+}
+
+void stress_test_bcc() {
+ ll queries = 0;
+ for (int tries = 0; tries < 200'000; tries++) {
+ int n = Random::integer<int>(1, 30);
+ int m = Random::integer<int>(0, max<int>(1, min<int>(300, n*(n-1) / 2 + 1)));
+ Graph<NoData, 0, 1> g(n);
+ g.erdosRenyi(m);
+
+ adj.assign(n, {});
+ int nextId = 0;
+ g.forEdges([&](int a, int b){
+ adj[a].push_back({a, b, nextId});
+ adj[b].push_back({b, a, nextId});
+ nextId++;
+ });
+
+ auto expected = naiveBCC(nextId);
+ find();
+ vector<vector<int>> got(sz(bcc));
+ for (int i = 0; i < sz(bcc); i++) {
+ for (auto e : bcc[i]) got[i].push_back(e.id);
+ sort(all(got[i]));
+ }
+ sort(all(got));
+
+ if (got != expected) cerr << "error" << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+int main() {
+ stress_test_bcc();
+}
diff --git a/test/graph/articulationPoints.bridges.cpp b/test/graph/articulationPoints.bridges.cpp
new file mode 100644
index 0000000..a1b89d2
--- /dev/null
+++ b/test/graph/articulationPoints.bridges.cpp
@@ -0,0 +1,64 @@
+#include "../util.h"
+struct edge {
+ ll from, to, id;
+};
+#define Edge edge
+#include <graph/articulationPoints.cpp>
+#undef Edge
+
+vector<bool> naiveBridges(const vector<pair<int, int>>& edges) {
+ vector<bool> res(sz(edges));
+
+ vector<int> seen(sz(adj), -1);
+ for (int i = 0; i < sz(edges); i++) {
+ auto [a, b] = edges[i];
+ vector<int> todo = {a};
+ seen[a] = i;
+ while (!todo.empty() && seen[b] != i) {
+ int c = todo.back();
+ todo.pop_back();
+ for (auto e : adj[c]) {
+ if (e.id == i) continue;
+ if (seen[e.to] == i) continue;
+ seen[e.to] = i;
+ todo.push_back(e.to);
+ }
+ }
+ res[i] = seen[b] != i;
+ }
+ return res;
+}
+
+void stress_test_bridges() {
+ ll queries = 0;
+ for (int tries = 0; tries < 200'000; tries++) {
+ int n = Random::integer<int>(1, 30);
+ int m = Random::integer<int>(0, max<int>(1, min<int>(300, n*(n-1) / 2 + 1)));
+ Graph<NoData, 0, 1> g(n);
+ g.erdosRenyi(m);
+
+ adj.assign(n, {});
+ vector<pair<int, int>> edges;
+ g.forEdges([&](int a, int b){
+ adj[a].push_back({a, b, sz(edges)});
+ adj[b].push_back({b, a, sz(edges)});
+ edges.emplace_back(a, b);
+ });
+
+ auto expected = naiveBridges(edges);
+ find();
+ vector<bool> got(sz(edges));
+ for (auto e : bridges) {
+ if (got[e.id]) cerr << "error: duclicate" << FAIL;
+ got[e.id] = true;
+ }
+
+ if (got != expected) cerr << "error" << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+int main() {
+ stress_test_bridges();
+}
diff --git a/test/graph/articulationPoints.cpp b/test/graph/articulationPoints.cpp
new file mode 100644
index 0000000..2567a09
--- /dev/null
+++ b/test/graph/articulationPoints.cpp
@@ -0,0 +1,85 @@
+#include "../util.h"
+struct edge {
+ ll from, to, id;
+};
+#define Edge edge
+#include <graph/articulationPoints.cpp>
+#undef Edge
+
+vector<bool> naiveArt() {
+ vector<bool> res(sz(adj));
+
+ vector<int> seen(sz(adj), -1);
+ for (int i = 0; i < sz(adj); i++) {
+ if (adj[i].empty()) continue;
+ seen[i] = i;
+ vector<ll> todo = {adj[i][0].to};
+ seen[todo[0]] = i;
+ while (!todo.empty()) {
+ int c = todo.back();
+ todo.pop_back();
+ for (auto e : adj[c]) {
+ if (seen[e.to] == i) continue;
+ seen[e.to] = i;
+ todo.push_back(e.to);
+ }
+ }
+ for (auto e : adj[i]) {
+ if (seen[e.to] != i) res[i] = true;
+ }
+ }
+ return res;
+}
+
+void stress_test_art() {
+ ll queries = 0;
+ for (int tries = 0; tries < 200'000; tries++) {
+ int n = Random::integer<int>(1, 30);
+ int m = Random::integer<int>(0, max<int>(1, min<int>(300, n*(n-1) / 2 + 1)));
+ Graph<NoData, 0, 1> g(n);
+ g.erdosRenyi(m);
+
+ adj.assign(n, {});
+ int nextId = 0;
+ g.forEdges([&](int a, int b){
+ adj[a].push_back({a, b, nextId});
+ adj[b].push_back({b, a, nextId});
+ nextId++;
+ });
+
+ auto expected = naiveArt();
+ find();
+ vector<bool> got = isArt;
+
+ if (got != expected) cerr << "error" << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 500'000;
+constexpr int M = 2'000'000;
+void performance_test() {
+ timer t;
+ Graph<NoData, 0, 1> g(N);
+ g.erdosRenyi(M);
+ adj.assign(N, {});
+ int nextId = 0;
+ g.forEdges([&](int a, int b){
+ adj[a].push_back({a, b, nextId});
+ adj[b].push_back({b, a, nextId});
+ nextId++;
+ });
+
+ t.start();
+ find();
+ t.stop();
+ hash_t hash = sz(bridges) + sz(bcc);
+ if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test_art();
+ performance_test();
+}
diff --git a/test/graph/bellmannFord.cpp b/test/graph/bellmannFord.cpp
new file mode 100644
index 0000000..92f1fef
--- /dev/null
+++ b/test/graph/bellmannFord.cpp
@@ -0,0 +1,70 @@
+#include "../util.h"
+constexpr ll INF = LL::INF;
+struct edge {
+ int from, to;
+ ll cost;
+};
+#include <graph/bellmannFord.cpp>
+namespace floydWarshall {
+#include <graph/floydWarshall.cpp>
+}
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 100'000; tries++) {
+ int n = Random::integer<int>(2, 30);
+ int m = Random::integer<int>(n-1, max<int>(n, min<int>(500, n*(n-1) / 2 + 1)));
+ vector<ll> potential = Random::integers<ll>(n, 0, 1'000'000'000'000ll);
+
+ vector<edge> edges;
+ floydWarshall::dist.assign(n, vector<ll>(n, INF));
+ for (int i = 0; i < n; i++) floydWarshall::dist[i][i] = 0;
+
+ Graph<NoData, true> g(n);
+ g.erdosRenyi(m);
+ g.forEdges([&](int a, int b){
+ ll w = Random::integer<ll>(1, 100'000'000'000ll);
+ w = potential[b] + w - potential[a];
+ edges.push_back({a, b, w});
+ floydWarshall::dist[a][b] = min(floydWarshall::dist[a][b], w);
+ });
+
+ floydWarshall::floydWarshall();
+ for (int i = 0; i < n; i++) {
+ auto got = bellmannFord(n, edges, i);
+ auto expected = floydWarshall::dist[i];
+
+ if (got != expected) cerr << "error" << FAIL;
+ queries += n;
+ }
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 5'000;
+constexpr int M = 20'000;
+void performance_test() {
+ timer t;
+ Graph<NoData> g(N);
+ g.erdosRenyi(M);
+ vector<edge> edges;
+ g.forEdges([&](int a, int b){
+ ll w1 = Random::integer<ll>(1, 1'000'000'000'000ll);
+ ll w2 = Random::integer<ll>(1, 1'000'000'000'000ll);
+ edges.push_back({a, b, w1});
+ edges.push_back({b, a, w2});
+ });
+
+ t.start();
+ auto got = bellmannFord(N, edges, 0);
+ t.stop();
+ hash_t hash = 0;
+ for (auto x : got) hash += x;
+ if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/bitonicTSP.cpp b/test/graph/bitonicTSP.cpp
new file mode 100644
index 0000000..7c448a2
--- /dev/null
+++ b/test/graph/bitonicTSP.cpp
@@ -0,0 +1,49 @@
+#include "../util.h"
+namespace got {
+#include <graph/bitonicTSP.cpp>
+}
+namespace expected {
+#include <graph/bitonicTSPsimple.cpp>
+}
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 200'000; tries++) {
+ int n = Random::integer<int>(1, 30);
+
+ vector<vector<double>> dist(n);
+ for (auto& v : dist) v = Random::reals<double>(n, 0, 1e18);
+
+ got::dist = dist;
+ expected::dist = dist;
+
+ auto got = got::bitonicTSP();
+ auto expected = got::bitonicTSP();
+
+ if (got != expected) cerr << "error" << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+//this is an easy graph...
+constexpr int N = 5'000;
+void performance_test() {
+ timer t;
+ got::dist = vector<vector<double>>(N);
+ for (auto& v : got::dist) v = Random::reals<double>(N, 0, 1e18);
+
+
+ t.start();
+ auto got = got::bitonicTSP();
+ t.stop();
+ hash_t hash = 0;
+ for (auto x : got) hash += x;
+ if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/bitonicTSPsimple.cpp b/test/graph/bitonicTSPsimple.cpp
new file mode 100644
index 0000000..c79a0ef
--- /dev/null
+++ b/test/graph/bitonicTSPsimple.cpp
@@ -0,0 +1,49 @@
+#include "../util.h"
+namespace got {
+#include <graph/bitonicTSPsimple.cpp>
+}
+namespace expected {
+#include <graph/bitonicTSP.cpp>
+}
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 200'000; tries++) {
+ int n = Random::integer<int>(1, 30);
+
+ vector<vector<double>> dist(n);
+ for (auto& v : dist) v = Random::reals<double>(n, 0, 1e18);
+
+ got::dist = dist;
+ expected::dist = dist;
+
+ auto got = got::bitonicTSP();
+ auto expected = got::bitonicTSP();
+
+ if (got != expected) cerr << "error" << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+//this is an easy graph...
+constexpr int N = 2'000;
+void performance_test() {
+ timer t;
+ got::dist = vector<vector<double>>(N);
+ for (auto& v : got::dist) v = Random::reals<double>(N, 0, 1e18);
+
+
+ t.start();
+ auto got = got::bitonicTSP();
+ t.stop();
+ hash_t hash = 0;
+ for (auto x : got) hash += x;
+ if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/blossom.cpp b/test/graph/blossom.cpp
new file mode 100644
index 0000000..714b029
--- /dev/null
+++ b/test/graph/blossom.cpp
@@ -0,0 +1,76 @@
+#include "../util.h"
+namespace tutte {
+void gauss(int n, ll mod);
+#include <graph/matching.cpp>
+#include <math/shortModInv.cpp>
+#include <math/lgsFp.cpp>
+}
+#include <graph/blossom.cpp>
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 5'000; tries++) {
+ int n = Random::integer<int>(1, 30);
+ int m = Random::integer<int>(0, max<int>(1, n*(n-1) / 2 + 1));
+
+ GM blossom(n);
+ srand(Random::rng());
+ tutte::adj.assign(n, {});
+
+ Graph<NoData> g(n);
+ g.erdosRenyi(m);
+ g.forEdges([&](int a, int b){
+ tutte::adj[a].push_back(b);
+ tutte::adj[b].push_back(a);
+
+ blossom.adj[a].push_back(b);
+ blossom.adj[b].push_back(a);
+ });
+
+ ll got = blossom.match();
+ ll expected = tutte::max_matching();
+
+ vector<bool> seen(n);
+ ll got2 = 0;
+ for (int i = 0; i < n; i++) {
+ int j = blossom.pairs[i];
+ if (j >= n) continue;
+ if (blossom.pairs[j] != i) cerr << "error: inconsitent" << FAIL;
+ if (j == i) cerr << "error: invalid" << FAIL;
+ if (j < i) continue;
+ if (seen[i] || seen[j]) cerr << "error: invalid" << FAIL;
+ seen[i] = seen[j] = true;
+ got2++;
+ }
+
+ if (got != got2) cerr << "got: " << got << ", got2: " << got2 << FAIL;
+ if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+//this is an easy graph...
+constexpr int N = 100'000;
+constexpr int M = 500'000;
+void performance_test() {
+ timer t;
+ Graph<NoData> g(N);
+ g.erdosRenyi(M);
+ GM blossom(N);
+ g.forEdges([&](int a, int b){
+ blossom.adj[a].push_back(b);
+ blossom.adj[b].push_back(a);
+ });
+
+ t.start();
+ hash_t hash = blossom.match();
+ t.stop();
+ if (t.time > 200) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/bronKerbosch.cpp b/test/graph/bronKerbosch.cpp
new file mode 100644
index 0000000..1ccd493
--- /dev/null
+++ b/test/graph/bronKerbosch.cpp
@@ -0,0 +1,73 @@
+#include "../util.h"
+#include <graph/bronKerbosch.cpp>
+
+vector<bits> naiveCliques;
+
+void naive(bits mask = {}, int l = 0) {
+ bool maximal = true;
+ for (ll i = 0; i < l; i++) {
+ if (mask[i]) continue;
+ if ((adj[i] & mask) == mask) maximal = false;
+ }
+ for (; l < sz(adj); l++) {
+ if ((adj[l] & mask) == mask) {
+ maximal = false;
+ mask[l] = 1;
+ naive(mask, l + 1);
+ mask[l] = 0;
+ }
+ }
+ if (maximal and mask.any()) naiveCliques.push_back(mask);
+}
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 200'000; tries++) {
+ int n = Random::integer<int>(2, 15);
+ int m = Random::integer<int>(0, max<int>(n, min<int>(500, n*(n-1) / 2 + 1)));
+
+ Graph<NoData> g(n);
+ g.erdosRenyi(m);
+ adj.assign(n, {});
+ g.forEdges([&](int a, int b){
+ addEdge(a, b);
+ });
+
+ bronKerbosch();
+ naiveCliques.clear();
+ naive();
+
+ sort(all(cliques), [](bits a, bits b){return a.to_ullong() < b.to_ullong();});
+ sort(all(naiveCliques), [](bits a, bits b){return a.to_ullong() < b.to_ullong();});
+
+ if (cliques != naiveCliques) cerr << "got: " << sz(cliques) << ", expected: " << sz(naiveCliques) << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 55;
+constexpr int M = N*(N-1) / 2 - 2*N;
+void performance_test() {
+ timer t;
+
+ Graph<NoData> g(N);
+ g.erdosRenyi(M);
+ adj.assign(N, {});
+ g.forEdges([&](int a, int b){
+ addEdge(a, b);
+ });
+
+ t.start();
+ bronKerbosch();
+ t.stop();
+
+ hash_t hash = sz(cliques);
+ if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/centroid.cpp b/test/graph/centroid.cpp
new file mode 100644
index 0000000..41d9d0f
--- /dev/null
+++ b/test/graph/centroid.cpp
@@ -0,0 +1,77 @@
+#include "../util.h"
+vector<vector<int>> adj;
+#include <graph/centroid.cpp>
+
+int subtreeSize(int c, int p) {
+ int res = 1;
+ for (int x : adj[c]) {
+ if (x == p) continue;
+ res += subtreeSize(x, c);
+ }
+ return res;
+}
+
+vector<int> naive() {
+ vector<int> res;
+ for (int i = 0; i < sz(adj); i++) {
+ bool isCentroid = true;
+ for (int j : adj[i]) isCentroid &= 2*subtreeSize(j, i) <= sz(adj);
+ if (isCentroid) res.push_back(i);
+ }
+ return res;
+}
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 100'000; tries++) {
+ int n = Random::integer<int>(1, 50);
+ Graph<NoData> g(n);
+ g.tree();
+
+ adj.assign(n, {});
+ g.forEdges([&](int a, int b){
+ adj[a].push_back(b);
+ adj[b].push_back(a);
+ });
+
+ auto expected = naive();
+ sort(all(expected));
+
+ for (int i = 0; i < n; i++) {
+ auto [a, b] = find_centroid(i);
+ vector<int> got;
+ if (a >= 0) got.push_back(a);
+ if (b >= 0) got.push_back(b);
+ sort(all(got));
+
+ if (got != expected) cerr << "error" << FAIL;
+ }
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 2'000'000;
+void performance_test() {
+ timer t;
+ Graph<NoData> g(N);
+ g.tree();
+
+ adj.assign(N, {});
+ g.forEdges([&](int a, int b){
+ adj[a].push_back(b);
+ adj[b].push_back(a);
+ });
+
+ t.start();
+ auto [gotA, gotB] = find_centroid();
+ t.stop();
+ hash_t hash = gotA + gotB;
+ if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/cycleCounting.cpp b/test/graph/cycleCounting.cpp
new file mode 100644
index 0000000..8e53aec
--- /dev/null
+++ b/test/graph/cycleCounting.cpp
@@ -0,0 +1,79 @@
+#include "../util.h"
+#include <datastructures/unionFind.cpp>
+#include <graph/cycleCounting.cpp>
+
+int naive(const vector<pair<int, int>>& edges, int n) {
+ int res = 0;
+ for (int i = 1; i < (1ll << sz(edges)); i++) {
+ vector<int> deg(n);
+ init(n);
+ int cycles = 0;
+ for (int j = 0; j < sz(edges); j++) {
+ if (((i >> j) & 1) != 0) {
+ auto [a, b] = edges[j];
+ deg[a]++;
+ deg[b]++;
+ if (findSet(a) != findSet(b)) {
+ unionSets(a, b);
+ } else {
+ cycles++;
+ }
+ }
+ }
+ bool ok = cycles == 1;
+ for (auto d : deg) ok &= d == 0 || d == 2;
+ if (ok) res++;
+ }
+ return res;
+}
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 50'000; tries++) {
+ int n = Random::integer<int>(1, 8);
+ int m = Random::integer<int>(0, min<int>(15, n*(n-1) / 2 + 1));
+
+ Graph<NoData, 0, 1, 1> g(n);
+ g.erdosRenyi(m);
+ vector<pair<int, int>> edges;
+ cycles cyc(n);
+ g.forEdges([&](int a, int b){
+ edges.emplace_back(a, b);
+ cyc.addEdge(a, b);
+ });
+
+ int expected = naive(edges, n);
+ int got = cyc.count();
+
+ if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 100;
+constexpr int M = 20;
+void performance_test() {
+ timer t;
+
+ Graph<NoData> g(N);
+ g.tree();
+ g.erdosRenyi(M);
+ cycles cyc(N);
+ g.forEdges([&](int a, int b){
+ cyc.addEdge(a, b);
+ });
+
+ t.start();
+ hash_t hash = cyc.count();
+ cerr << sz(cyc.base) << endl;
+ t.stop();
+
+ if (t.time > 1000) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/dijkstra.cpp b/test/graph/dijkstra.cpp
new file mode 100644
index 0000000..c0cfb7e
--- /dev/null
+++ b/test/graph/dijkstra.cpp
@@ -0,0 +1,64 @@
+#include "../util.h"
+constexpr ll INF = LL::INF;
+#include <graph/dijkstra.cpp>
+struct edge {
+ int from, to;
+ ll cost;
+};
+#include <graph/bellmannFord.cpp>
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 100'000; tries++) {
+ int n = Random::integer<int>(2, 30);
+ int m = Random::integer<int>(n-1, max<int>(n, min<int>(500, n*(n-1) / 2 + 1)));
+
+ vector<vector<path>> adj(n);
+ vector<edge> edges;
+
+ Graph<NoData, true> g(n);
+ g.erdosRenyi(m);
+ g.forEdges([&](int a, int b){
+ ll w = Random::integer<ll>(1, 1'000'000'000'000ll);
+ adj[a].push_back({w, b});
+ edges.push_back({a, b, w});
+ });
+
+ for (int i = 0; i < n; i++) {
+ auto got = dijkstra(adj, i);
+ auto expected = bellmannFord(n, edges, i);
+
+ if (got != expected) cerr << "error" << FAIL;
+ queries += n;
+ }
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 500'000;
+constexpr int M = 3'000'000;
+void performance_test() {
+ timer t;
+ Graph<NoData> g(N);
+ g.erdosRenyi(M);
+ vector<vector<path>> adj(N);
+ g.forEdges([&](int a, int b){
+ ll w1 = Random::integer<ll>(1, 1'000'000'000'000ll);
+ ll w2 = Random::integer<ll>(1, 1'000'000'000'000ll);
+ adj[a].push_back({w1, b});
+ adj[b].push_back({w2, a});
+ });
+
+ t.start();
+ auto got = dijkstra(adj, 0);
+ t.stop();
+ hash_t hash = 0;
+ for (auto x : got) hash += x;
+ if (t.time > 1000) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/dinicScaling.cpp b/test/graph/dinicScaling.cpp
new file mode 100644
index 0000000..967d6b1
--- /dev/null
+++ b/test/graph/dinicScaling.cpp
@@ -0,0 +1,61 @@
+#include "../util.h"
+namespace dinic {
+#include <graph/dinicScaling.cpp>
+}
+
+namespace pushRelabel {
+#include <graph/pushRelabel.cpp>
+}
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 20'000; tries++) {
+ int n = Random::integer<int>(2, 30);
+ int m = Random::integer<int>(n-1, max<int>(n, min<int>(500, n*(n-1) / 2 + 1)));
+
+ dinic::adj.assign(n, {});
+ pushRelabel::adj.assign(n, {});
+
+ Graph<NoData, true> g(n);
+ g.erdosRenyi(m);
+ g.forEdges([](int a, int b){
+ ll w = Random::integer<ll>(1, 1'000'000'000'000ll);
+ dinic::addEdge(a, b, w);
+ pushRelabel::addEdge(a, b, w);
+ });
+
+ ll got = dinic::maxFlow(0, n - 1);
+ ll expected = pushRelabel::maxFlow(0, n - 1);
+
+ if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 50000;
+constexpr int M = 200000;
+void performance_test() {
+ using namespace dinic;
+ timer t;
+ Graph<NoData> g(N);
+ g.erdosRenyi(M);
+ adj.assign(N, {});
+ g.forEdges([](int a, int b){
+ ll w1 = Random::integer<ll>(1, 1'000'000'000'000ll);
+ ll w2 = Random::integer<ll>(1, 1'000'000'000'000ll);
+ addEdge(a, b, w1);
+ addEdge(b, a, w2);
+ });
+
+ t.start();
+ hash_t hash = maxFlow(0, N - 1);
+ t.stop();
+ if (t.time > 2000) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/euler.cpp b/test/graph/euler.cpp
new file mode 100644
index 0000000..6666040
--- /dev/null
+++ b/test/graph/euler.cpp
@@ -0,0 +1,87 @@
+#include "../util.h"
+struct Euler {
+ Euler(int n) : idx(n), validIdx(n) {}
+#include <graph/euler.cpp>
+};
+
+Euler eulerGraph(int n, int m) {
+ Euler res(n);
+
+ Graph<NoData> g(n);
+ g.tree();
+ g.forEdges([&](int a, int b) {
+ res.addEdge(a, b);
+ });
+
+ for (int i = n-1; i < m; i++) {
+ int a = Random::integer<int>(0, n);
+ int b = Random::integer<int>(0, n);
+ res.addEdge(a, b);
+ }
+ int last = -1;
+ for (int i = 0; i < n; i++) {
+ if (sz(res.idx[i]) % 2 != 0) {
+ if (last >= 0) {
+ res.addEdge(last, i);
+ last = -1;
+ } else {
+ last = i;
+ }
+ }
+ }
+ if (last >= 0) cerr << "FAIL" << FAIL;
+
+ return res;
+}
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 200'000; tries++) {
+ int n = Random::integer<int>(1, 30);
+ int m = Random::integer<int>(n-1, 200);
+
+ auto g = eulerGraph(n, m);
+
+ vector<vector<int>> expected(n);
+ for (int i = 0; i < n; i++) {
+ for (int j : g.idx[i]) {
+ expected[i].push_back(g.to[j]);
+ }
+ sort(all(expected[i]));
+ }
+
+ g.euler(0);
+ vector<vector<int>> got(n);
+ if (g.cycle.front() != g.cycle.back()) cerr << "error: not cyclic" << FAIL;
+ for (int i = 1; i < sz(g.cycle); i++) {
+ int a = g.cycle[i-1];
+ int b = g.cycle[i];
+ got[a].push_back(b);
+ got[b].push_back(a);
+ }
+ for (auto& v : got) sort(all(v));
+
+ if (got != expected) cerr << "error" << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 100'000;
+constexpr int M = 1'000'000;
+void performance_test() {
+ timer t;
+ auto g = eulerGraph(N, M);
+ t.start();
+ g.euler(0);
+ t.stop();
+ hash_t hash = 0;
+ for (int x : g.cycle) hash += x;
+ if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/floydWarshall.cpp b/test/graph/floydWarshall.cpp
new file mode 100644
index 0000000..a93a9ea
--- /dev/null
+++ b/test/graph/floydWarshall.cpp
@@ -0,0 +1,90 @@
+#include "../util.h"
+constexpr ll INF = LL::INF;
+struct edge {
+ int from, to;
+ ll cost;
+};
+#include <graph/bellmannFord.cpp>
+namespace floydWarshall {
+#include <graph/floydWarshall.cpp>
+}
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 100'000; tries++) {
+ int n = Random::integer<int>(2, 30);
+ int m = Random::integer<int>(n-1, max<int>(n, min<int>(500, n*(n-1) / 2 + 1)));
+ vector<ll> potential = Random::integers<ll>(n, 0, 1'000'000'000'000ll);
+
+ vector<edge> edges;
+ floydWarshall::dist.assign(n, vector<ll>(n, INF));
+ for (int i = 0; i < n; i++) floydWarshall::dist[i][i] = 0;
+
+ Graph<NoData, true> g(n);
+ g.erdosRenyi(m);
+ g.forEdges([&](int a, int b){
+ ll w = Random::integer<ll>(1, 100'000'000'000ll);
+ w = potential[b] + w - potential[a];
+ edges.push_back({a, b, w});
+ floydWarshall::dist[a][b] = min(floydWarshall::dist[a][b], w);
+ });
+
+ vector<vector<ll>> orig = floydWarshall::dist;
+
+ floydWarshall::floydWarshall();
+ for (int i = 0; i < n; i++) {
+ for (int j = 0; j < 10; j++) {
+ int k = Random::integer<int>(0, n);
+ auto path = floydWarshall::getPath(i, k);
+ if (path.empty() != (floydWarshall::dist[i][k] == INF)) cerr << "error: reconstruction" << FAIL;
+ if (path.empty()) continue;
+ if (path.front() != i) cerr << "error: start" << FAIL;
+ if (path.back() != k) cerr << "error: end" << FAIL;
+ for (int l = 1; l < sz(path); l++) {
+ if (floydWarshall::dist[i][path[l-1]] +
+ orig[path[l-1]][path[l]] +
+ floydWarshall::dist[path[l]][k] !=
+ floydWarshall::dist[i][k]) cerr << "error: edge" << FAIL;
+ }
+ }
+ }
+
+ for (int i = 0; i < n; i++) {
+ auto got = floydWarshall::dist[i];
+ auto expected = bellmannFord(n, edges, i);
+
+ if (got != expected) cerr << "error" << FAIL;
+ queries += n;
+ }
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 500;
+constexpr int M = 20'000;
+void performance_test() {
+ timer t;
+ Graph<NoData> g(N);
+ g.erdosRenyi(M);
+ floydWarshall::dist.assign(N, vector<ll>(N, INF));
+ for (int i = 0; i < N; i++) floydWarshall::dist[i][i] = 0;
+ g.forEdges([&](int a, int b){
+ ll w1 = Random::integer<ll>(1, 1'000'000'000'000ll);
+ ll w2 = Random::integer<ll>(1, 1'000'000'000'000ll);
+ floydWarshall::dist[a][b] = w1;
+ floydWarshall::dist[b][a] = w2;
+ });
+
+ t.start();
+ floydWarshall::floydWarshall();
+ t.stop();
+ hash_t hash = 0;
+ for (auto x : floydWarshall::dist[42]) hash += x;
+ if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/havelHakimi.cpp b/test/graph/havelHakimi.cpp
new file mode 100644
index 0000000..71476ec
--- /dev/null
+++ b/test/graph/havelHakimi.cpp
@@ -0,0 +1,65 @@
+#include "../util.h"
+#include <graph/havelHakimi.cpp>
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 200'000; tries++) {
+ int n = Random::integer<int>(1, 30);
+ int m = Random::integer<int>(0, n*(n-1) / 2 + 1);
+ Graph g(n);
+ g.erdosRenyi(m);
+
+ vector<int> expected(n);
+ for (int i = 0; i < n; i++) expected[i] = g.deg(i);
+
+ auto res = havelHakimi(expected);
+ if (sz(res) != n) cerr << "error: wrong number of nodes" << FAIL;
+ vector<vector<int>> rev(n);
+ vector<int> got(n);
+ for (int i = 0; i < n; i++) {
+ got[i] = sz(res[i]);
+ for (int j : res[i]) {
+ if (j < 0 || j >= n) cerr << "error: invalid edge" << FAIL;
+ rev[j].push_back(i);
+ }
+ }
+
+ for (int i = 0; i < n; i++) {
+ sort(all(res[i]));
+ sort(all(rev[i]));
+ if (res[i] != rev[i]) cerr << "error: graph is directed" << FAIL;
+ for (int j : res[i]) if (j == i) cerr << "error: graph has loop" << FAIL;
+ for (int j = 1; j < sz(res[i]); j++) {
+ if (res[i][j] == res[i][j-1]) cerr << "error: multiedge" << FAIL;
+ }
+ }
+
+ if (expected != got) cerr << "error" << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 200'000;
+constexpr int M = 1'000'000;
+void performance_test() {
+ timer t;
+ Graph g(N);
+ g.erdosRenyi(M);
+
+ vector<int> expected(N);
+ for (int i = 0; i < N; i++) expected[i] = g.deg(i);
+
+ t.start();
+ auto res = havelHakimi(expected);
+ t.stop();
+ hash_t hash = 0;
+ for (auto& v : res) hash += sz(v);
+ if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/hopcroftKarp.cpp b/test/graph/hopcroftKarp.cpp
new file mode 100644
index 0000000..05599dd
--- /dev/null
+++ b/test/graph/hopcroftKarp.cpp
@@ -0,0 +1,74 @@
+#include "../util.h"
+namespace kuhn {
+#include <graph/maxCarBiMatch.cpp>
+}
+namespace hk {
+#include <graph/hopcroftKarp.cpp>
+}
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 50'000; tries++) {
+ int n = Random::integer<int>(1, 30);
+ int m = Random::integer<int>(0, max<int>(1, n*(n-1) / 2 + 1));
+
+ kuhn::adj.assign(2*n, {});
+ hk::adj.assign(2*n, {});
+
+ Graph<NoData> g(n);
+ g.erdosRenyi(m);
+ g.forEdges([&](int a, int b){
+ kuhn::adj[a].push_back(n+b);
+ kuhn::adj[b+n].push_back(a);
+
+ hk::adj[a].push_back(n+b);
+ hk::adj[b+n].push_back(a);
+ });
+
+ ll got = hk::hopcroft_karp(n);
+ ll expected = kuhn::kuhn(n);
+
+ vector<bool> seen(2*n);
+ ll got2 = 0;
+ for (int i = 0; i < n; i++) {
+ int j = hk::pairs[i];
+ if (j < 0) continue;
+ if (hk::pairs[j] != i) cerr << "error: inconsitent" << FAIL;
+ if (j == i) cerr << "error: invalid" << FAIL;
+ if (j < i) continue;
+ if (seen[i] || seen[j]) cerr << "error: invalid" << FAIL;
+ seen[i] = seen[j] = true;
+ got2++;
+ }
+
+ if (got != got2) cerr << "got: " << got << ", got2: " << got2 << FAIL;
+ if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+//this is an easy graph...
+constexpr int N = 100'000;
+constexpr int M = 500'000;
+void performance_test() {
+ timer t;
+ Graph<NoData> g(N);
+ g.erdosRenyi(M);
+ hk::adj.assign(2*N, {});
+ g.forEdges([&](int a, int b){
+ hk::adj[a].push_back(N+b);
+ hk::adj[b+N].push_back(a);
+ });
+
+ t.start();
+ hash_t hash = hk::hopcroft_karp(N);
+ t.stop();
+ if (t.time > 300) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/kruskal.cpp b/test/graph/kruskal.cpp
new file mode 100644
index 0000000..f6245b9
--- /dev/null
+++ b/test/graph/kruskal.cpp
@@ -0,0 +1,91 @@
+#include "../util.h"
+#include <datastructures/unionFind.cpp>
+
+struct edge {
+ int from, to;
+ ll cost;
+ bool operator<(const edge& o) const {
+ return cost > o.cost;
+ }
+};
+ll kruskal(vector<edge>& edges, int n) {
+ init(n);
+ #define Edge edge
+ #include <graph/kruskal.cpp>
+ #undef Edge
+ return cost;
+}
+
+ll prim(vector<edge>& edges, int n) {
+ vector<vector<pair<ll, int>>> adj(n);
+ for (auto [a, b, d] : edges) {
+ adj[a].emplace_back(d, b);
+ adj[b].emplace_back(d, a);
+ }
+ priority_queue<pair<ll, int>> todo;
+ vector<bool> seen(n);
+ ll res = 0;
+ for (ll i = 0; i < n; i++) {
+ if (seen[i]) continue;
+ todo.push({0, i});
+ while (!todo.empty()) {
+ auto [d, c] = todo.top();
+ todo.pop();
+ if (seen[c]) continue;
+ seen[c] = true;
+ res += d;
+ for (auto e : adj[c]) {
+ todo.push(e);
+ }
+ }
+ }
+ return res;
+}
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 100'000; tries++) {
+ int n = Random::integer<int>(2, 30);
+ int m = Random::integer<int>(0, max<int>(n, min<int>(500, n*(n-1) / 2 + 1)));
+
+
+ Graph<NoData> g(n);
+ g.erdosRenyi(m);
+ vector<edge> edges;
+ g.forEdges([&](int a, int b){
+ ll w = Random::integer<ll>(-1'000'000'000ll, 1'000'000'000ll);
+ edges.push_back({a, b, w});
+ });
+
+ ll got = kruskal(edges, n);
+ ll expected = prim(edges, n);
+
+ if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 500'000;
+constexpr int M = 3'000'000;
+void performance_test() {
+ timer t;
+ Graph<NoData> g(N);
+ g.erdosRenyi(M);
+ vector<edge> edges;
+ g.forEdges([&](int a, int b){
+ ll w = Random::integer<ll>(-1'000'000'000ll, 1'000'000'000ll);
+ edges.push_back({a, b, w});
+ });
+
+ t.start();
+ hash_t hash = kruskal(edges, N);
+ t.stop();
+ if (t.time > 1000) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/matching.cpp b/test/graph/matching.cpp
new file mode 100644
index 0000000..b8fbc6c
--- /dev/null
+++ b/test/graph/matching.cpp
@@ -0,0 +1,62 @@
+#include "../util.h"
+namespace tutte {
+void gauss(int n, ll mod);
+#include <graph/matching.cpp>
+#include <math/shortModInv.cpp>
+#include <math/lgsFp.cpp>
+}
+#include <graph/blossom.cpp>
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 5'000; tries++) {
+ int n = Random::integer<int>(1, 30);
+ int m = Random::integer<int>(0, max<int>(1, n*(n-1) / 2 + 1));
+
+ GM blossom(n);
+ srand(Random::rng());
+ tutte::adj.assign(n, {});
+
+ Graph<NoData> g(n);
+ g.erdosRenyi(m);
+ g.forEdges([&](int a, int b){
+ tutte::adj[a].push_back(b);
+ tutte::adj[b].push_back(a);
+
+ blossom.adj[a].push_back(b);
+ blossom.adj[b].push_back(a);
+ });
+
+ ll got = tutte::max_matching();
+ ll expected = blossom.match();
+
+ if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 125;
+constexpr int M = 5'000;
+void performance_test() {
+ timer t;
+ Graph<NoData> g(N);
+ g.erdosRenyi(M);
+ srand(Random::rng());
+ tutte::adj.assign(N, {});
+ g.forEdges([&](int a, int b){
+ tutte::adj[a].push_back(b);
+ tutte::adj[b].push_back(a);
+ });
+
+ t.start();
+ hash_t hash = tutte::max_matching();
+ t.stop();
+ if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/maxCarBiMatch.cpp b/test/graph/maxCarBiMatch.cpp
new file mode 100644
index 0000000..6d7fad0
--- /dev/null
+++ b/test/graph/maxCarBiMatch.cpp
@@ -0,0 +1,74 @@
+#include "../util.h"
+namespace kuhn {
+#include <graph/maxCarBiMatch.cpp>
+}
+namespace hk {
+#include <graph/hopcroftKarp.cpp>
+}
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 50'000; tries++) {
+ int n = Random::integer<int>(1, 30);
+ int m = Random::integer<int>(0, max<int>(1, n*(n-1) / 2 + 1));
+
+ kuhn::adj.assign(2*n, {});
+ hk::adj.assign(2*n, {});
+
+ Graph<NoData> g(n);
+ g.erdosRenyi(m);
+ g.forEdges([&](int a, int b){
+ kuhn::adj[a].push_back(n+b);
+ kuhn::adj[b+n].push_back(a);
+
+ hk::adj[a].push_back(n+b);
+ hk::adj[b+n].push_back(a);
+ });
+
+ ll got = kuhn::kuhn(n);
+ ll expected = hk::hopcroft_karp(n);
+
+ vector<bool> seen(2*n);
+ ll got2 = 0;
+ for (int i = 0; i < n; i++) {
+ int j = kuhn::pairs[i];
+ if (j < 0) continue;
+ if (kuhn::pairs[j] != i) cerr << "error: inconsitent" << FAIL;
+ if (j == i) cerr << "error: invalid" << FAIL;
+ if (j < i) continue;
+ if (seen[i] || seen[j]) cerr << "error: invalid" << FAIL;
+ seen[i] = seen[j] = true;
+ got2++;
+ }
+
+ if (got != got2) cerr << "got: " << got << ", got2: " << got2 << FAIL;
+ if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+//this is an easy graph...
+constexpr int N = 10'000;
+constexpr int M = 100'000;
+void performance_test() {
+ timer t;
+ Graph<NoData> g(N);
+ g.erdosRenyi(M);
+ kuhn::adj.assign(2*N, {});
+ g.forEdges([&](int a, int b){
+ kuhn::adj[a].push_back(N+b);
+ kuhn::adj[b+N].push_back(a);
+ });
+
+ t.start();
+ hash_t hash = kuhn::kuhn(N);
+ t.stop();
+ if (t.time > 200) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/maxWeightBipartiteMatching.cpp b/test/graph/maxWeightBipartiteMatching.cpp
new file mode 100644
index 0000000..d245405
--- /dev/null
+++ b/test/graph/maxWeightBipartiteMatching.cpp
@@ -0,0 +1,59 @@
+#include "../util.h"
+#pragma GCC diagnostic ignored "-Wshadow"
+namespace matching {
+ constexpr int N_LEFT = 1000;
+ constexpr int N_RIGHT = 1000;
+ constexpr double INF = LD::INF;
+ #include <graph/maxWeightBipartiteMatching.cpp>
+}
+namespace mcmf {
+ #include <graph/minCostMaxFlow.cpp>
+}
+
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 20'000; tries++) {
+ auto [l, r] = Random::pair<int>(1, 30);
+ mcmf::MinCostFlow mcmf(l+r+2, 0, 1);
+
+ for (int i = 0; i < l; i++) mcmf.addEdge(0, 2 + i, 1, 0);
+ for (int i = 0; i < r; i++) mcmf.addEdge(2 + l + i, 1, 1, 0);
+ for (int i = 0; i < l; i++) {
+ for (int j = 0; j < r; j++) {
+ matching::costs[i][j] = Random::integer<int>(-100, 100);
+ mcmf.addEdge(2 + i, 2 + l + j, 1, -matching::costs[i][j]);
+ }
+ }
+
+ double got = matching::match(l, r);
+ mcmf.mincostflow();
+ ll expected = -mcmf.mincost;
+
+ if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
+ queries += l + r;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+void performance_test() {
+ using namespace matching;
+ timer t;
+
+ for (int i = 0; i < N_LEFT; i++) {
+ for (int j = 0; j < N_RIGHT; j++) {
+ costs[i][j] = Random::integer<int>(-100, 100);
+ }
+ }
+
+ t.start();
+ hash_t hash = match(N_LEFT, N_RIGHT);
+ t.stop();
+ if (t.time > 1000) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/minCostMaxFlow.cpp b/test/graph/minCostMaxFlow.cpp
new file mode 100644
index 0000000..8c92aa7
--- /dev/null
+++ b/test/graph/minCostMaxFlow.cpp
@@ -0,0 +1,68 @@
+#include "../util.h"
+#pragma GCC diagnostic ignored "-Wshadow"
+namespace matching {
+ constexpr int N_LEFT = 1000;
+ constexpr int N_RIGHT = 1000;
+ constexpr double INF = LD::INF;
+ #include <graph/maxWeightBipartiteMatching.cpp>
+}
+namespace mcmf {
+ #include <graph/minCostMaxFlow.cpp>
+}
+
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 20'000; tries++) {
+ auto [l, r] = Random::pair<int>(1, 30);
+ mcmf::MinCostFlow mcmf(l+r+2, 0, 1);
+
+ for (int i = 0; i < l; i++) mcmf.addEdge(0, 2 + i, 1, 0);
+ for (int i = 0; i < r; i++) mcmf.addEdge(2 + l + i, 1, 1, 0);
+ for (int i = 0; i < l; i++) {
+ for (int j = 0; j < r; j++) {
+ matching::costs[i][j] = Random::integer<int>(-100, 100);
+ mcmf.addEdge(2 + i, 2 + l + j, 1, -matching::costs[i][j]);
+ }
+ }
+
+ mcmf.mincostflow();
+ ll got = -mcmf.mincost;
+ double expected = matching::match(l, r);
+
+ if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
+ queries += l + r;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 1'000;
+constexpr int M = 10'000;
+void performance_test() {
+ using namespace mcmf;
+ timer t;
+
+ Graph<NoData> g(N);
+ g.erdosRenyi(M);
+ MinCostFlow mcmf(N, 0, 1);
+ vector<ll> potential = Random::integers<ll>(N, 0, 1'000'000ll);
+ g.forEdges([&](int a, int b){
+ ll c = Random::integer<ll>(1, 1000'000);
+ ll cost = Random::integer<ll>(0, 1000'000);
+ mcmf.addEdge(a, b, c, potential[b] + cost - potential[a]);
+ mcmf.addEdge(b, a, c, potential[a] + cost - potential[b]);
+ });
+
+ t.start();
+ mcmf.mincostflow();
+ t.stop();
+
+ hash_t hash = mcmf.mincost;
+ if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/pushRelabel.cpp b/test/graph/pushRelabel.cpp
new file mode 100644
index 0000000..ac3b079
--- /dev/null
+++ b/test/graph/pushRelabel.cpp
@@ -0,0 +1,61 @@
+#include "../util.h"
+namespace dinic {
+#include <graph/dinicScaling.cpp>
+}
+
+namespace pushRelabel {
+#include <graph/pushRelabel.cpp>
+}
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 20'000; tries++) {
+ int n = Random::integer<int>(2, 30);
+ int m = Random::integer<int>(n-1, max<int>(n, min<int>(500, n*(n-1) / 2 + 1)));
+
+ dinic::adj.assign(n, {});
+ pushRelabel::adj.assign(n, {});
+
+ Graph<NoData, true> g(n);
+ g.erdosRenyi(m);
+ g.forEdges([](int a, int b){
+ ll w = Random::integer<ll>(1, 1'000'000'000'000ll);
+ dinic::addEdge(a, b, w);
+ pushRelabel::addEdge(a, b, w);
+ });
+
+ ll got = pushRelabel::maxFlow(0, n - 1);
+ ll expected = dinic::maxFlow(0, n - 1);
+
+ if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 50000;
+constexpr int M = 200000;
+void performance_test() {
+ using namespace pushRelabel;
+ timer t;
+ Graph<NoData> g(N);
+ g.erdosRenyi(M);
+ adj.assign(N, {});
+ g.forEdges([](int a, int b){
+ ll w1 = Random::integer<ll>(1, 1'000'000'000'000ll);
+ ll w2 = Random::integer<ll>(1, 1'000'000'000'000ll);
+ addEdge(a, b, w1);
+ addEdge(b, a, w2);
+ });
+
+ t.start();
+ hash_t hash = maxFlow(0, N - 1);
+ t.stop();
+ if (t.time > 300) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/scc.cpp b/test/graph/scc.cpp
new file mode 100644
index 0000000..123050f
--- /dev/null
+++ b/test/graph/scc.cpp
@@ -0,0 +1,92 @@
+#include "../util.h"
+#include <graph/scc.cpp>
+#include <datastructures/unionFind.cpp>
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 100'000; tries++) {
+ int n = Random::integer<int>(1, 30);
+ int m = Random::integer<int>(0, max<int>(1, min<int>(100, n*(n-1) / 2 + 1)));
+ Graph<NoData, true> g(n);
+ g.erdosRenyi(m);
+
+ adj.assign(n, {});
+ g.forEdges([](int a, int b){
+ adj[a].push_back(b);
+ });
+ scc();
+
+ vector<bool> tmp(n);
+ for (int i = 0; i < sz(sccs); i++) {
+ for (int x : sccs[i]) {
+ if (tmp[x]) cerr << "error: duclicate" << FAIL;
+ if (idx[x] != i) cerr << "error: inconsistent" << FAIL;
+ tmp[x] = true;
+ }
+ }
+ for (int i = 0; i < n; i++) {
+ if (!tmp[i]) cerr << "error: missing" << FAIL;
+ }
+
+ init(n);
+ vector<ll> seen(n);
+ int tmpCounter = 0;
+ auto reach = [&](int a, int b) {
+ tmpCounter++;
+ seen[a] = tmpCounter;
+ vector<int> todo = {a};
+ while (seen[b] != tmpCounter && !todo.empty()) {
+ a = todo.back();
+ todo.pop_back();
+ g.forOut(a, [&](int /**/, int x){
+ if (seen[x] != tmpCounter) {
+ seen[x] = tmpCounter;
+ todo.push_back(x);
+ }
+ });
+ }
+ return seen[b] == tmpCounter;
+ };
+ for (int a = 0; a < n; a++) {
+ for (int b = 0; b < a; b++) {
+ if (findSet(a) == findSet(b)) continue;
+ if (reach(a, b) && reach(b, a)) unionSets(a, b);
+ }
+ }
+
+ for (int a = 0; a < n; a++) {
+ for (int b = 0; b <= a; b++) {
+ bool got = idx[a] == idx[b];
+ bool expected = findSet(a) == findSet(b);
+ if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
+ }
+ }
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 500'000;
+constexpr int M = 2'000'000;
+void performance_test() {
+ timer t;
+ Graph<NoData, true> g(N);
+ g.erdosRenyi(M);
+ adj.assign(N, {});
+ g.forEdges([](int a, int b){
+ adj[a].push_back(b);
+ });
+
+ t.start();
+ scc();
+ t.stop();
+ hash_t hash = 0;
+ for (int x : idx) hash += x;
+ if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/stoerWagner.cpp b/test/graph/stoerWagner.cpp
new file mode 100644
index 0000000..2003f09
--- /dev/null
+++ b/test/graph/stoerWagner.cpp
@@ -0,0 +1,81 @@
+#include "../util.h"
+constexpr ll INF = LL::INF;
+
+namespace stoerWagner {
+#include <graph/stoerWagner.cpp>
+ void addEdge(int u, int v, ll c) {
+ adj[u].push_back({u, v, c});
+ adj[v].push_back({v, u, c});
+ }
+}
+
+namespace pushRelabel {
+#include <graph/pushRelabel.cpp>
+ ll minCut() {
+ ll res = INF;
+ for (int i = 0; i < sz(adj); i++) {
+ for (int j = 0; j < i; j++) {
+ if (i == j) continue;
+ res = min(res, maxFlow(i, j));
+ for (auto& v : adj) {
+ for (auto& e : v) {
+ e.f = 0;
+ }
+ }
+ }
+ }
+ return res;
+ }
+}
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 5'000; tries++) {
+ int n = Random::integer<int>(2, 30);
+ int m = Random::integer<int>(n-1, max<int>(n, min<int>(500, n*(n-1) / 2 + 1)));
+
+ stoerWagner::adj.assign(n, {});
+ pushRelabel::adj.assign(n, {});
+
+ Graph<NoData> g(n);
+ g.erdosRenyi(m);
+ g.forEdges([](int a, int b){
+ ll w = Random::integer<ll>(1, 1'000'000'000'000ll);
+ stoerWagner::addEdge(a, b, w);
+ pushRelabel::addEdge(a, b, w);
+ pushRelabel::addEdge(b, a, w);
+ });
+
+ ll got = stoerWagner::stoer_wagner();
+ ll expected = pushRelabel::minCut();
+
+ if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 200;
+constexpr int M = 10000;
+void performance_test() {
+ using namespace stoerWagner;
+ timer t;
+ Graph<NoData> g(N);
+ g.erdosRenyi(M);
+ adj.assign(N, {});
+ g.forEdges([](int a, int b){
+ ll w = Random::integer<ll>(1, 1'000'000'000'000ll);
+ addEdge(a, b, w);
+ });
+
+ t.start();
+ hash_t hash = stoer_wagner();
+ t.stop();
+ if (t.time > 2000) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/treeIsomorphism.cpp b/test/graph/treeIsomorphism.cpp
new file mode 100644
index 0000000..97f4df4
--- /dev/null
+++ b/test/graph/treeIsomorphism.cpp
@@ -0,0 +1,126 @@
+#include "../util.h"
+struct tree {
+ tree(int n) : adj(n) {}
+ #include <graph/treeIsomorphism.cpp>
+ #include <graph/centroid.cpp>
+
+ pair<int, int> treeLabel() {
+ auto [a, b] = find_centroid(0);
+ if (a >= 0) a = treeLabel(a);
+ if (b >= 0) b = treeLabel(b);
+ if (a > b) swap(a, b);
+ return {a, b};
+ }
+};
+
+void stress_test_eq() {
+ ll queries = 0;
+ for (int tries = 0; tries < 100'000; tries++) {
+ int n = Random::integer<int>(1, 50);
+ Graph<NoData> g(n);
+ g.tree();
+
+ tree t(n);
+
+ g.forEdges([&](int a, int b){
+ t.adj[a].push_back(b);
+ t.adj[b].push_back(a);
+ });
+ auto [gotA, gotB] = t.treeLabel();
+
+ g.permutate();
+ t.adj.assign(n, {});
+ g.forEdges([&](int a, int b){
+ t.adj[a].push_back(b);
+ t.adj[b].push_back(a);
+ });
+ auto [expectedA, expectedB] = t.treeLabel();
+
+ if (gotA != expectedA) cerr << "got: " << gotA << ", expected: " << expectedA << FAIL;
+ if (gotB != expectedB) cerr << "got: " << gotB << ", expected: " << expectedB << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+void test_tiny() {
+ vector<int> expected = {1,1,1,1,2,3,6,11,23}; //#A000055
+ for (int i = 1; i < sz(expected); i++) {
+ set<pair<int, int>> got;
+ tree t(i);
+
+ int labeled = 1;
+ for (int j = 3; j < i; j++) labeled *= i;
+ for (int j = 0; j < 10 * labeled; j++) {
+ Graph<NoData> g(i);
+ g.tree();
+
+ t.adj.assign(i, {});
+ g.forEdges([&](int a, int b){
+ t.adj[a].push_back(b);
+ t.adj[b].push_back(a);
+ });
+
+ got.insert(t.treeLabel());
+ }
+ if (sz(got) != expected[i]) cerr << i << ", got: " << sz(got) << ", expected: " << expected[i] << FAIL;
+ }
+ cerr << "tested tiny: " << sz(expected) << endl;
+}
+
+void stress_test_neq() {
+ ll queries = 0;
+ for (int tries = 0; tries < 100'000; tries++) {
+ int n = Random::integer<int>(20, 50);
+ Graph<NoData> g(n);
+ g.tree();
+
+ tree t(n);
+
+ g.forEdges([&](int a, int b){
+ t.adj[a].push_back(b);
+ t.adj[b].push_back(a);
+ });
+ auto [gotA, gotB] = t.treeLabel();
+
+ g.clear().tree();
+ t.adj.assign(n, {});
+ g.forEdges([&](int a, int b){
+ t.adj[a].push_back(b);
+ t.adj[b].push_back(a);
+ });
+ auto [expectedA, expectedB] = t.treeLabel();
+
+ if (gotA == expectedA && gotA >= 0) cerr << "error: " << n << ", " << tries << FAIL;
+ if (gotB == expectedB) cerr << "error: " << n << ", " << tries << FAIL;
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 500'000;
+void performance_test() {
+ timer t;
+ Graph<NoData> g(N);
+ g.tree();
+
+ tree tt(N);
+ g.forEdges([&](int a, int b){
+ tt.adj[a].push_back(b);
+ tt.adj[b].push_back(a);
+ });
+
+ t.start();
+ auto [gotA, gotB] = tt.treeLabel();
+ t.stop();
+ hash_t hash = gotA + gotB;
+ if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test_eq();
+ test_tiny();
+ stress_test_neq();
+ performance_test();
+}