From 8d11c6c8213f46f0fa19826917c255edd5d43cb1 Mon Sep 17 00:00:00 2001 From: mzuenni Date: Sun, 28 Jul 2024 22:54:40 +0200 Subject: Test (#4) * update * moved content in subdir * rename file * add test setup * add test setup * add github action * automaticly test all cpp files * timeout after 10s * setulimit and dont zero memory * test build pdf * install latexmk * update * update * ngerman * fonts * removed old code * add first test * added tests * test in sorted order * more tests * simplified test * more tests * fix suffix tree * fixes and improvements * done ust lst directly * fix swap * add links to pdf * fix constants * add primorial * add comment * various improvements * more tests * added missing stuf * more tests * fix tests * more tests * more tests * more tests * fix recursion? * test trie * more tests * only use python temporarily for listings * only use python temporarily for listings * more tests * fix longestCommonSubstring * more tests * more tests * made code more similiar * fix? * more tests * more tests * more tests * add ahoCorasick test + limit 4GB stack size * more tests * fix test * add additional test * more tests * more tests * fix? * better fix * fix virtual tree * more tests * more tests * recursive closest pair * more tests * decrease limit * new tests * more tests * fix name * more tests * add test * new test * more tests * more tests * more tests * more tests * new test and content * new code * new code * larger tests * fix and test * new test * new test * update pdf * remove comments * new test * more tests * more testcases * more tests * increased limit * more tests * more tests * more tests * new tests * more tests * shortened code * new test * add basic tests for bigint * more tests * removed old files * new test * ignore some files * more auto more ccw * fix test * more tests * fix * new tests * more tests * more tests * stronger test * actually verify delaunay... * more tests * fix header * more tests * run tests parallel? * test parralel? * add --missing * separate workflows * test * is the pdf checked? * separate workflows * fix workflow * more workflows --------- Co-authored-by: Yidi --- content/graph/2sat.cpp | 31 +++ content/graph/LCA_sparse.cpp | 32 ++++ content/graph/TSP.cpp | 29 +++ content/graph/articulationPoints.cpp | 43 +++++ content/graph/bellmannFord.cpp | 19 ++ content/graph/bitonicTSP.cpp | 31 +++ content/graph/bitonicTSPsimple.cpp | 27 +++ content/graph/blossom.cpp | 82 ++++++++ content/graph/bronKerbosch.cpp | 24 +++ content/graph/centroid.cpp | 21 +++ content/graph/connect.cpp | 31 +++ content/graph/cycleCounting.cpp | 64 +++++++ content/graph/dfs.tex | 16 ++ content/graph/dijkstra.cpp | 21 +++ content/graph/dinicScaling.cpp | 51 +++++ content/graph/euler.cpp | 23 +++ content/graph/floydWarshall.cpp | 27 +++ content/graph/graph.tex | 269 +++++++++++++++++++++++++++ content/graph/havelHakimi.cpp | 18 ++ content/graph/hld.cpp | 44 +++++ content/graph/hopcroftKarp.cpp | 47 +++++ content/graph/kruskal.cpp | 9 + content/graph/matching.cpp | 23 +++ content/graph/maxCarBiMatch.cpp | 25 +++ content/graph/maxWeightBipartiteMatching.cpp | 50 +++++ content/graph/minCostMaxFlow.cpp | 66 +++++++ content/graph/pushRelabel.cpp | 64 +++++++ content/graph/reroot.cpp | 62 ++++++ content/graph/scc.cpp | 32 ++++ content/graph/stoerWagner.cpp | 53 ++++++ content/graph/treeIsomorphism.cpp | 15 ++ content/graph/virtualTree.cpp | 22 +++ 32 files changed, 1371 insertions(+) create mode 100644 content/graph/2sat.cpp create mode 100644 content/graph/LCA_sparse.cpp create mode 100644 content/graph/TSP.cpp create mode 100644 content/graph/articulationPoints.cpp create mode 100644 content/graph/bellmannFord.cpp create mode 100644 content/graph/bitonicTSP.cpp create mode 100644 content/graph/bitonicTSPsimple.cpp create mode 100644 content/graph/blossom.cpp create mode 100644 content/graph/bronKerbosch.cpp create mode 100644 content/graph/centroid.cpp create mode 100644 content/graph/connect.cpp create mode 100644 content/graph/cycleCounting.cpp create mode 100644 content/graph/dfs.tex create mode 100644 content/graph/dijkstra.cpp create mode 100644 content/graph/dinicScaling.cpp create mode 100644 content/graph/euler.cpp create mode 100644 content/graph/floydWarshall.cpp create mode 100644 content/graph/graph.tex create mode 100644 content/graph/havelHakimi.cpp create mode 100644 content/graph/hld.cpp create mode 100644 content/graph/hopcroftKarp.cpp create mode 100644 content/graph/kruskal.cpp create mode 100644 content/graph/matching.cpp create mode 100644 content/graph/maxCarBiMatch.cpp create mode 100644 content/graph/maxWeightBipartiteMatching.cpp create mode 100644 content/graph/minCostMaxFlow.cpp create mode 100644 content/graph/pushRelabel.cpp create mode 100644 content/graph/reroot.cpp create mode 100644 content/graph/scc.cpp create mode 100644 content/graph/stoerWagner.cpp create mode 100644 content/graph/treeIsomorphism.cpp create mode 100644 content/graph/virtualTree.cpp (limited to 'content/graph') diff --git a/content/graph/2sat.cpp b/content/graph/2sat.cpp new file mode 100644 index 0000000..75e54e6 --- /dev/null +++ b/content/graph/2sat.cpp @@ -0,0 +1,31 @@ +struct sat2 { + int n; // + scc variablen + vector sol; + + sat2(int vars) : n(vars*2), adj(n) {} + + static int var(int i) {return i << 1;} // use this! + + void addImpl(int a, int b) { + adj[a].push_back(b); + adj[1^b].push_back(1^a); + } + void addEquiv(int a, int b) {addImpl(a, b); addImpl(b, a);} + void addOr(int a, int b) {addImpl(1^a, b);} + void addXor(int a, int b) {addOr(a, b); addOr(1^a, 1^b);} + void addTrue(int a) {addImpl(1^a, a);} + void addFalse(int a) {addTrue(1^a);} + void addAnd(int a, int b) {addTrue(a); addTrue(b);} + void addNand(int a, int b) {addOr(1^a, 1^b);} + + bool solve() { + scc(); //scc code von oben + sol.assign(n, -1); + for (int i = 0; i < n; i += 2) { + if (idx[i] == idx[i + 1]) return false; + sol[i] = idx[i] < idx[i + 1]; + sol[i + 1] = !sol[i]; + } + return true; + } +}; diff --git a/content/graph/LCA_sparse.cpp b/content/graph/LCA_sparse.cpp new file mode 100644 index 0000000..221b5ed --- /dev/null +++ b/content/graph/LCA_sparse.cpp @@ -0,0 +1,32 @@ +struct LCA { + vector depth; + vector visited, first; + int idx; + SparseTable st; //sparse table @\sourceref{datastructures/sparseTable.cpp}@ + + void init(vector>& adj, int root) { + depth.assign(2 * sz(adj), 0); + visited.assign(2 * sz(adj), -1); + first.assign(sz(adj), 2 * sz(adj)); + idx = 0; + dfs(adj, root); + st.init(&depth); + } + + void dfs(vector>& adj, int v, ll d=0) { + visited[idx] = v, depth[idx] = d; + first[v] = min(idx, first[v]), idx++; + + for (int u : adj[v]) { + if (first[u] == 2 * sz(adj)) { + dfs(adj, u, d + 1); + visited[idx] = v, depth[idx] = d, idx++; + }}} + + int getLCA(int u, int v) { + if (first[u] > first[v]) swap(u, v); + return visited[st.queryIdempotent(first[u], first[v] + 1)]; + } + + ll getDepth(int v) {return depth[first[v]];} +}; diff --git a/content/graph/TSP.cpp b/content/graph/TSP.cpp new file mode 100644 index 0000000..6223858 --- /dev/null +++ b/content/graph/TSP.cpp @@ -0,0 +1,29 @@ +vector> dist; // Entfernung zwischen je zwei Punkten. + +auto TSP() { + int n = sz(dist), m = 1 << n; + vector> dp(n, vector(m, edge{INF, -1})); + + for (int c = 0; c < n; c++) + dp[c][m-1].dist = dist[c][0], dp[c][m-1].to = 0; + + for (int v = m - 2; v >= 0; v--) { + for (int c = n - 1; c >= 0; c--) { + for (int g = 0; g < n; g++) { + if (g != c && !((1 << g) & v)) { + if ((dp[g][(v | (1 << g))].dist + dist[c][g]) < + dp[c][v].dist) { + dp[c][v].dist = + dp[g][(v | (1 << g))].dist + dist[c][g]; + dp[c][v].to = g; + }}}}} + // return dp[0][1]; // Länge der Tour + + vector tour = {0}; + int v = 0; + while (tour.back() != 0 || sz(tour) == 1) + tour.push_back(dp[tour.back()] + [(v |= (1 << tour.back()))].to); + // Enthält Knoten 0 zweimal. An erster und letzter Position. + return tour; +} diff --git a/content/graph/articulationPoints.cpp b/content/graph/articulationPoints.cpp new file mode 100644 index 0000000..25ff67e --- /dev/null +++ b/content/graph/articulationPoints.cpp @@ -0,0 +1,43 @@ +vector> adj; +vector num; +int counter, rootCount, root; +vector isArt; +vector bridges, st; +vector> bcc; + +int dfs(int v, int from = -1) { + int me = num[v] = ++counter, top = me; + for (Edge& e : adj[v]) { + if (e.id == from) continue; + if (num[e.to]) { + top = min(top, num[e.to]); + if (num[e.to] < me) st.push_back(e); + } else { + if (v == root) rootCount++; + int si = sz(st); + int up = dfs(e.to, e.id); + top = min(top, up); + if (up >= me) isArt[v] = true; + if (up > me) bridges.push_back(e); + if (up <= me) st.push_back(e); + if (up == me) { + bcc.emplace_back(si + all(st)); + st.resize(si); + }}} + return top; +} + +void find() { + counter = 0; + num.assign(sz(adj), 0); + isArt.assign(sz(adj), false); + bridges.clear(); + st.clear(); + bcc.clear(); + for (int v = 0; v < sz(adj); v++) { + if (!num[v]) { + root = v; + rootCount = 0; + dfs(v); + isArt[v] = rootCount > 1; +}}} diff --git a/content/graph/bellmannFord.cpp b/content/graph/bellmannFord.cpp new file mode 100644 index 0000000..09ea1aa --- /dev/null +++ b/content/graph/bellmannFord.cpp @@ -0,0 +1,19 @@ +auto bellmannFord(int n, vector& edges, int start) { + vector dist(n, INF), prev(n, -1); + dist[start] = 0; + + for (int i = 1; i < n; i++) { + for (edge& e : edges) { + if (dist[e.from] != INF && + dist[e.from] + e.cost < dist[e.to]) { + dist[e.to] = dist[e.from] + e.cost; + prev[e.to] = e.from; + }}} + + for (edge& e : edges) { + if (dist[e.from] != INF && + dist[e.from] + e.cost < dist[e.to]) { + // Negativer Kreis gefunden. + }} + return dist; //return prev? +} diff --git a/content/graph/bitonicTSP.cpp b/content/graph/bitonicTSP.cpp new file mode 100644 index 0000000..6470232 --- /dev/null +++ b/content/graph/bitonicTSP.cpp @@ -0,0 +1,31 @@ +vector> dist; // Initialisiere mit Entfernungen zwischen Punkten. + +auto bitonicTSP() { + vector dp(sz(dist), HUGE_VAL); + vector pre(sz(dist)); // nur für Tour + dp[0] = 0; dp[1] = 2 * dist[0][1]; pre[1] = 0; + for (unsigned int i = 2; i < sz(dist); i++) { + double link = 0; + for (int j = i - 2; j >= 0; j--) { + link += dist[j + 1][j + 2]; + double opt = link + dist[j][i] + dp[j + 1] - dist[j][j + 1]; + if (opt < dp[i]) { + dp[i] = opt; + pre[i] = j; + }}} + // return dp.back(); // Länger der Tour + + int j, n = sz(dist) - 1; + vector ut, lt = {n, n - 1}; + do { + j = pre[n]; + (lt.back() == n ? lt : ut).push_back(j); + for (int i = n - 1; i > j + 1; i--) { + (lt.back() == i ? lt : ut).push_back(i - 1); + } + } while(n = j + 1, j > 0); + (lt.back() == 1 ? lt : ut).push_back(0); + reverse(all(lt)); + lt.insert(lt.end(), all(ut)); + return lt;// Enthält Knoten 0 zweimal. An erster und letzter Position. +} diff --git a/content/graph/bitonicTSPsimple.cpp b/content/graph/bitonicTSPsimple.cpp new file mode 100644 index 0000000..8b6e6c5 --- /dev/null +++ b/content/graph/bitonicTSPsimple.cpp @@ -0,0 +1,27 @@ +vector> dist; // Entfernungen zwischen Punkten. +vector> dp; + +double get(int p1, int p2) { + int v = max(p1, p2) + 1; + if (v == sz(dist)) return dist[p1][v - 1] + dist[p2][v - 1]; + if (dp[p1][p2] >= 0.0) return dp[p1][p2]; + double tryLR = dist[p1][v] + get(v, p2); + double tryRL = dist[p2][v] + get(p1, v); + return dp[p1][p2] = min(tryLR, tryRL); +} + +auto bitonicTSP() { + dp = vector>(sz(dist), + vector(sz(dist), -1)); + get(0, 0); + // return dp[0][0]; // Länger der Tour + vector lr = {0}, rl = {0}; + for (int p1 = 0, p2 = 0, v; (v = max(p1, p2)+1) < sz(dist);) { + if (dp[p1][p2] == dist[p1][v] + dp[v][p2]) { + lr.push_back(v); p1 = v; + } else { + rl.push_back(v); p2 = v; + }} + lr.insert(lr.end(), rl.rbegin(), rl.rend()); + return lr;// Enthält Knoten 0 zweimal. An erster und letzter Position. +} diff --git a/content/graph/blossom.cpp b/content/graph/blossom.cpp new file mode 100644 index 0000000..7bd494a --- /dev/null +++ b/content/graph/blossom.cpp @@ -0,0 +1,82 @@ +struct GM { + vector> adj; + // pairs ist der gematchte knoten oder n + vector pairs, first, que; + vector> label; + int head, tail; + + GM(int n) : adj(n), pairs(n + 1, n), first(n + 1, n), + que(n), label(n + 1, {-1, -1}) {} + + void rematch(int u, int v) { + int t = pairs[u]; pairs[u] = v; + if (pairs[t] != u) return; + if (label[u].second == -1) { + pairs[t] = label[u].first; + rematch(pairs[t], t); + } else { + auto [x, y] = label[u]; + rematch(x, y); + rematch(y, x); + }} + + int findFirst(int v) { + return label[first[v]].first < 0 ? first[v] + : first[v] = findFirst(first[v]); + } + + void relabel(int x, int y) { + int r = findFirst(x); + int s = findFirst(y); + if (r == s) return; + auto h = label[r] = label[s] = {~x, y}; + int join; + while (true) { + if (s != sz(adj)) swap(r, s); + r = findFirst(label[pairs[r]].first); + if (label[r] == h) { + join = r; + break; + } else { + label[r] = h; + }} + for (int v : {first[x], first[y]}) { + for (; v != join; v = first[label[pairs[v]].first]) { + label[v] = {x, y}; + first[v] = join; + que[tail++] = v; + }}} + + bool augment(int v) { + label[v] = {sz(adj), -1}; + first[v] = sz(adj); + head = tail = 0; + for (que[tail++] = v; head < tail;) { + int x = que[head++]; + for (int y : adj[x]) { + if (pairs[y] == sz(adj) && y != v) { + pairs[y] = x; + rematch(x, y); + return true; + } else if (label[y].first >= 0) { + relabel(x, y); + } else if (label[pairs[y]].first == -1) { + label[pairs[y]].first = x; + first[pairs[y]] = y; + que[tail++] = pairs[y]; + }}} + return false; + } + + int match() { + int matching = head = tail = 0; + for (int v = 0; v < sz(adj); v++) { + if (pairs[v] < sz(adj) || !augment(v)) continue; + matching++; + for (int i = 0; i < tail; i++) + label[que[i]] = label[pairs[que[i]]] = {-1, -1}; + label[sz(adj)] = {-1, -1}; + } + return matching; + } +}; diff --git a/content/graph/bronKerbosch.cpp b/content/graph/bronKerbosch.cpp new file mode 100644 index 0000000..0cfcc5f --- /dev/null +++ b/content/graph/bronKerbosch.cpp @@ -0,0 +1,24 @@ +using bits = bitset<64>; +vector adj, cliques; + +void addEdge(int a, int b) { + if (a != b) adj[a][b] = adj[b][a] = 1; +} + +void bronKerboschRec(bits R, bits P, bits X) { + if (P.none() && X.none()) { + cliques.push_back(R); + } else { + int q = min(P._Find_first(), X._Find_first()); + bits cands = P & ~adj[q]; + for (int i = 0; i < sz(adj); i++) if (cands[i]) { + R[i] = 1; + bronKerboschRec(R, P & adj[i], X & adj[i]); + R[i] = P[i] = 0; + X[i] = 1; +}}} + +void bronKerbosch() { + cliques.clear(); + bronKerboschRec({}, {(1ull << sz(adj)) - 1}, {}); +} diff --git a/content/graph/centroid.cpp b/content/graph/centroid.cpp new file mode 100644 index 0000000..820945b --- /dev/null +++ b/content/graph/centroid.cpp @@ -0,0 +1,21 @@ +vector s; +void dfs_sz(int v, int from = -1) { + s[v] = 1; + for (int u : adj[v]) if (u != from) { + dfs_sz(u, v); + s[v] += s[u]; +}} + +pair dfs_cent(int v, int from, int n) { + for (int u : adj[v]) if (u != from) { + if (2 * s[u] == n) return {v, u}; + if (2 * s[u] > n) return dfs_cent(u, v, n); + } + return {v, -1}; +} + +pair find_centroid(int root = 0) { + s.resize(sz(adj)); + dfs_sz(root); + return dfs_cent(root, -1, s[root]); +} diff --git a/content/graph/connect.cpp b/content/graph/connect.cpp new file mode 100644 index 0000000..ffcd6c2 --- /dev/null +++ b/content/graph/connect.cpp @@ -0,0 +1,31 @@ +struct connect { + int n; + vector> edges; + LCT lct; // min LCT @\sourceref{datastructures/LCT.cpp}@, no updates required + + connect(int n, int m) : n(n), edges(m), lct(n+m) {} + + bool connected(int u, int v) { + return lct.connected(&lct.nodes[u], &lct.nodes[v]); + } + + void addEdge(int u, int v, int id) { + lct.nodes[id + n] = LCT::Node(id + n, id + n); + edges[id] = {u, v}; + if (connected(u, v)) { + int old = lct.query(&lct.nodes[u], &lct.nodes[v]); + if (old < id) eraseEdge(old); + } + if (!connected(u, v)) { + lct.link(&lct.nodes[u], &lct.nodes[id + n]); + lct.link(&lct.nodes[v], &lct.nodes[id + n]); + }} + + void eraseEdge(ll id) { + if (connected(edges[id].first, edges[id].second) && + lct.query(&lct.nodes[edges[id].first], + &lct.nodes[edges[id].second]) == id) { + lct.cut(&lct.nodes[edges[id].first], &lct.nodes[id + n]); + lct.cut(&lct.nodes[edges[id].second], &lct.nodes[id + n]); + }} +}; diff --git a/content/graph/cycleCounting.cpp b/content/graph/cycleCounting.cpp new file mode 100644 index 0000000..6a299ee --- /dev/null +++ b/content/graph/cycleCounting.cpp @@ -0,0 +1,64 @@ +constexpr int maxEdges = 128; +using cycle = bitset; +struct cycles { + vector>> adj; + vector seen; + vector paths, base; + vector> edges; + + cycles(int n) : adj(n), seen(n), paths(n) {} + + void addEdge(int u, int v) { + adj[u].push_back({v, sz(edges)}); + adj[v].push_back({u, sz(edges)}); + edges.push_back({u, v}); + } + + void addBase(cycle cur) { + for (cycle o : base) { + o ^= cur; + if (o._Find_first() > cur._Find_first()) cur = o; + } + if (cur.any()) base.push_back(cur); + } + + void findBase(int v, int from = -1, cycle cur = {}) { + if (from < 0 && seen[v]) return; + if (seen[v]) { + addBase(cur ^ paths[v]); + } else { + seen[v] = true; + paths[v] = cur; + for (auto [u, id] : adj[v]) { + if (u == from) continue; + cur[id].flip(); + findBase(u, v, cur); + cur[id].flip(); + }}} + + bool isCycle(cycle cur) {//cycle must be constrcuted from base + if (cur.none()) return false; + init(sz(adj)); // union find @\sourceref{datastructures/unionFind.cpp}@ + for (int i = 0; i < sz(edges); i++) { + if (cur[i]) { + cur[i] = false; + if (findSet(edges[i].first) == + findSet(edges[i].second)) break; + unionSets(edges[i].first, edges[i].second); + }} + return cur.none(); + } + + int count() { + for (int i = 0; i < sz(adj); i++) findBase(i); + assert(sz(base) < 30); + int res = 0; + for (int i = 1; i < (1 << sz(base)); i++) { + cycle cur; + for (int j = 0; j < sz(base); j++) + if (((i >> j) & 1) != 0) cur ^= base[j]; + if (isCycle(cur)) res++; + } + return res; + } +}; diff --git a/content/graph/dfs.tex b/content/graph/dfs.tex new file mode 100644 index 0000000..1e6705f --- /dev/null +++ b/content/graph/dfs.tex @@ -0,0 +1,16 @@ +\begin{expandtable} +\begin{tabularx}{\linewidth}{|X|XIXIX|} + \hline + Kantentyp $(v, w)$ & \code{dfs[v] < dfs[w]} & \code{fin[v] > fin[w]} & \code{seen[w]} \\ + %$(v, w)$ & \code{dfs[w]} & \code{fin[w]} & \\ + \hline + in-tree & \code{true} & \code{true} & \code{false} \\ + \grayhline + forward & \code{true} & \code{true} & \code{true} \\ + \grayhline + backward & \code{false} & \code{false} & \code{true} \\ + \grayhline + cross & \code{false} & \code{true} & \code{true} \\ + \hline +\end{tabularx} +\end{expandtable} diff --git a/content/graph/dijkstra.cpp b/content/graph/dijkstra.cpp new file mode 100644 index 0000000..61c636d --- /dev/null +++ b/content/graph/dijkstra.cpp @@ -0,0 +1,21 @@ +using path = pair; //dist, destination + +auto dijkstra(const vector>& adj, int start) { + priority_queue, greater> pq; + vector dist(sz(adj), INF); + vector prev(sz(adj), -1); + dist[start] = 0; pq.emplace(0, start); + + while (!pq.empty()) { + auto [dv, v] = pq.top(); pq.pop(); + if (dv > dist[v]) continue; // WICHTIG! + + for (auto [du, u] : adj[v]) { + ll newDist = dv + du; + if (newDist < dist[u]) { + dist[u] = newDist; + prev[u] = v; + pq.emplace(dist[u], u); + }}} + return dist; //return prev; +} diff --git a/content/graph/dinicScaling.cpp b/content/graph/dinicScaling.cpp new file mode 100644 index 0000000..f4e833a --- /dev/null +++ b/content/graph/dinicScaling.cpp @@ -0,0 +1,51 @@ +struct Edge { + int to, rev; + ll f, c; +}; + +vector> adj; +int s, t; +vector pt, dist; + +void addEdge(int u, int v, ll c) { + adj[u].push_back({v, (int)sz(adj[v]), 0, c}); + adj[v].push_back({u, (int)sz(adj[u]) - 1, 0, 0}); +} + +bool bfs(ll lim) { + dist.assign(sz(adj), -1); + dist[s] = 0; + queue q({s}); + while (!q.empty() && dist[t] < 0) { + int v = q.front(); q.pop(); + for (Edge& e : adj[v]) { + if (dist[e.to] < 0 && e.c - e.f >= lim) { + dist[e.to] = dist[v] + 1; + q.push(e.to); + }}} + return dist[t] >= 0; +} + +bool dfs(int v, ll flow) { + if (v == t) return true; + for (; pt[v] < sz(adj[v]); pt[v]++) { + Edge& e = adj[v][pt[v]]; + if (dist[e.to] != dist[v] + 1) continue; + if (e.c - e.f >= flow && dfs(e.to, flow)) { + e.f += flow; + adj[e.to][e.rev].f -= flow; + return true; + }} + return false; +} + +ll maxFlow(int source, int target) { + s = source, t = target; + ll flow = 0; + for (ll lim = (1LL << 62); lim >= 1; lim /= 2) { + while (bfs(lim)) { + pt.assign(sz(adj), 0); + while (dfs(s, lim)) flow += lim; + }} + return flow; +} diff --git a/content/graph/euler.cpp b/content/graph/euler.cpp new file mode 100644 index 0000000..a5ea192 --- /dev/null +++ b/content/graph/euler.cpp @@ -0,0 +1,23 @@ +vector> idx; +vector to, validIdx, cycle; +vector used; + +void addEdge(int u, int v) { + idx[u].push_back(sz(to)); + to.push_back(v); + used.push_back(false); + idx[v].push_back(sz(to)); // für ungerichtet + to.push_back(u); + used.push_back(false); +} + +void euler(int v) { // init idx und validIdx + for (;validIdx[v] < sz(idx[v]); validIdx[v]++) { + if (!used[idx[v][validIdx[v]]]) { + int u = to[idx[v][validIdx[v]]]; + used[idx[v][validIdx[v]]] = true; + used[idx[v][validIdx[v]] ^ 1] = true; // für ungerichtet + euler(u); + }} + cycle.push_back(v); // Zyklus in umgekehrter Reihenfolge. +} diff --git a/content/graph/floydWarshall.cpp b/content/graph/floydWarshall.cpp new file mode 100644 index 0000000..df096c2 --- /dev/null +++ b/content/graph/floydWarshall.cpp @@ -0,0 +1,27 @@ +vector> dist; // Entfernung zwischen je zwei Punkten. +vector> next; + +void floydWarshall() { + next.assign(sz(dist), vector(sz(dist), -1)); + for (int i = 0; i < sz(dist); i++) { + for (int j = 0; j < sz(dist); j++) { + if (dist[i][j] < INF) { + next[i][j] = j; + }}} + + for (int k = 0; k < sz(dist); k++) { + for (int i = 0; i < sz(dist); i++) { + for (int j = 0; j < sz(dist); j++) { + // only needed if dist can be negative + if (dist[i][k] == INF || dist[k][j] == INF) continue; + if (dist[i][j] > dist[i][k] + dist[k][j]) { + dist[i][j] = dist[i][k] + dist[k][j]; + next[i][j] = next[i][k]; +}}}}} + +vector getPath(int u, int v) { + if (next[u][v] < 0) return {}; + vector path = {u}; + while (u != v) path.push_back(u = next[u][v]); + return path; //Pfad u -> v +} diff --git a/content/graph/graph.tex b/content/graph/graph.tex new file mode 100644 index 0000000..831f4e5 --- /dev/null +++ b/content/graph/graph.tex @@ -0,0 +1,269 @@ +\section{Graphen} + +\begin{algorithm}{Kruskal} + \begin{methods}[ll] + berechnet den Minimalen Spannbaum & \runtime{\abs{E}\cdot\log(\abs{E})} \\ + \end{methods} + \sourcecode{graph/kruskal.cpp} +\end{algorithm} + +\begin{algorithm}{Minimale Spannbäume} + \paragraph{Schnitteigenschaft} + Für jeden Schnitt $C$ im Graphen gilt: + Gibt es eine Kante $e$, die echt leichter ist als alle anderen Schnittkanten, so gehört diese zu allen minimalen Spannbäumen. + ($\Rightarrow$ Die leichteste Kante in einem Schnitt kann in einem minimalen Spannbaum verwendet werden.) + + \paragraph{Kreiseigenschaft} + Für jeden Kreis $K$ im Graphen gilt: + Die schwerste Kante auf dem Kreis ist nicht Teil des minimalen Spannbaums. +\end{algorithm} + +\begin{algorithm}{Heavy-Light Decomposition} + \begin{methods} + \method{get\_intervals}{gibt Zerlegung des Pfades von $u$ nach $v$}{\log(\abs{V})} + \end{methods} + \textbf{Wichtig:} Intervalle sind halboffen + + Subbaum unter dem Knoten $v$ ist das Intervall $[\text{\code{in[v]}},~\text{\code{out[v]}})$. + \sourcecode{graph/hld.cpp} +\end{algorithm} + +\begin{algorithm}{Lowest Common Ancestor} + \begin{methods} + \method{init}{baut DFS-Baum über $g$ auf}{\abs{V}\*\log(\abs{V})} + \method{getLCA}{findet LCA}{1} + \method{getDepth}{berechnet Distanz zur Wurzel im DFS-Baum}{1} + \end{methods} + \sourcecode{graph/LCA_sparse.cpp} +\end{algorithm} + +\begin{algorithm}{Centroids} + \begin{methods} + \method{find\_centroid}{findet alle Centroids des Baums (maximal 2)}{\abs{V}} + \end{methods} + \sourcecode{graph/centroid.cpp} +\end{algorithm} + +\begin{algorithm}{Eulertouren} + \begin{methods} + \method{euler}{berechnet den Kreis}{\abs{V}+\abs{E}} + \end{methods} + \sourcecode{graph/euler.cpp} + \begin{itemize} + \item Zyklus existiert, wenn jeder Knoten geraden Grad hat (ungerichtet),\\ bei jedem Knoten Ein- und Ausgangsgrad übereinstimmen (gerichtet). + \item Pfad existiert, wenn genau $\{0, 2\}$ Knoten ungeraden Grad haben (ungerichtet),\\ bei allen Knoten Ein- und Ausgangsgrad übereinstimmen oder einer eine Ausgangskante mehr hat (Startknoten) und einer eine Eingangskante mehr hat (Endknoten). + \item \textbf{Je nach Aufgabenstellung überprüfen, wie ein unzusammenhängender Graph interpretiert werden sollen.} + \item Wenn eine bestimmte Sortierung verlangt wird oder Laufzeit vernachlässigbar ist, ist eine Implementierung mit einem \code{vector> adj} leichter + \item \textbf{Wichtig:} Algorithmus schlägt nicht fehl, falls kein Eulerzyklus existiert. + Die Existenz muss separat geprüft werden. + \end{itemize} +\end{algorithm} + +\begin{algorithm}{Baum-Isomorphie} + \begin{methods} + \method{treeLabel}{berechnet kanonischen Namen für einen Baum}{\abs{V}\*\log(\abs{V})} + \end{methods} + \sourcecode{graph/treeIsomorphism.cpp} +\end{algorithm} + +\subsection{Kürzeste Wege} + +\subsubsection{\textsc{Bellmann-Ford}-Algorithmus} +\method{bellmanFord}{kürzeste Pfade oder negative Kreise finden}{\abs{V}\*\abs{E}} +\sourcecode{graph/bellmannFord.cpp} + +\subsubsection{Algorithmus von \textsc{Dijkstra}} +\method{dijkstra}{kürzeste Pfade in Graphen ohne negative Kanten}{\abs{E}\*\log(\abs{V})} +\sourcecode{graph/dijkstra.cpp} + +\subsubsection{\textsc{Floyd-Warshall}-Algorithmus} +\method{floydWarshall}{kürzeste Pfade oder negative Kreise finden}{\abs{V}^3} +\begin{itemize} + \item \code{dist[i][i] = 0, dist[i][j] = edge\{j, j\}.weight} oder \code{INF} + \item \code{i} liegt auf einem negativen Kreis $\Leftrightarrow$ \code{dist[i][i] < 0}. +\end{itemize} +\sourcecode{graph/floydWarshall.cpp} + +\subsubsection{Matrix-Algorithmus} +Sei $d_{i\smash{j}}$ die Distanzmatrix von $G$, dann gibt $d_{i\smash{j}}^k$ die kürzeste Distanz von $i$ nach $j$ mit maximal $k$ kanten an mit der Verknüpfung: $c_{i\smash{j}} = a_{i\smash{j}} \otimes b_{i\smash{j}} = \min\{a_{ik} \cdot b_{k\smash{j}}\}$ + + +Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, dann gibt $a_{i\smash{j}}^k$ die Anzahl der Wege von $i$ nach $j$ mit Länge genau \textcolor{gray}{(maximal)} $k$ an mit der Verknüpfung: $c_{i\smash{j}} = a_{i\smash{j}} \otimes b_{i\smash{j}} = \sum a_{ik} \cdot b_{k\smash{j}}$ + +\begin{algorithm}{Dynamic Connectivity} + \begin{methods} + \method{Constructor}{erzeugt Baum ($n$ Knoten, $m$ updates)}{n+m} + \method{addEdge}{fügt Kannte ein,\code{id}=delete Zeitpunkt}{\log(n)} + \method{eraseEdge}{entfernt Kante \code{id}}{\log(n)} + \end{methods} + \sourcecode{graph/connect.cpp} +\end{algorithm} + +\begin{algorithm}{Erd\H{o}s-Gallai} + Sei $d_1 \geq \cdots \geq d_{n}$. Es existiert genau dann ein Graph $G$ mit Degreesequence $d$ falls $\sum\limits_{i=1}^{n} d_i$ gerade ist und für $1\leq k \leq n$: $\sum\limits_{i=1}^{k} d_i \leq k\cdot(k-1)+\sum\limits_{i=k+1}^{n} \min(d_i, k)$ + \begin{methods} + \method{havelHakimi}{findet Graph}{(\abs{V}+\abs{E})\cdot\log(\abs{V})} + \end{methods} + \sourcecode{graph/havelHakimi.cpp} +\end{algorithm} + +\begin{algorithm}{Strongly Connected Components (\textsc{Tarjan})} + \begin{methods} + \method{scc}{berechnet starke Zusammenhangskomponenten}{\abs{V}+\abs{E}} + \end{methods} + \sourcecode{graph/scc.cpp} +\end{algorithm} + +\begin{algorithm}{DFS} + \input{graph/dfs} +\end{algorithm} + +\begin{algorithm}{Artikulationspunkte, Brücken und BCC} + \begin{methods} + \method{find}{berechnet Artikulationspunkte, Brücken und BCC}{\abs{V}+\abs{E}} + \end{methods} + \textbf{Wichtig:} isolierte Knoten und Brücken sind keine BCC. + \sourcecode{graph/articulationPoints.cpp} +\end{algorithm} +\vfill\null\columnbreak + +\begin{algorithm}{2-SAT} + \sourcecode{graph/2sat.cpp} +\end{algorithm} + +\begin{algorithm}{Maximal Cliques} + \begin{methods} + \method{bronKerbosch}{berechnet alle maximalen Cliquen}{3^\frac{n}{3}} + \method{addEdge}{fügt \textbf{ungerichtete} Kante ein}{1} + \end{methods} + \sourcecode{graph/bronKerbosch.cpp} +\end{algorithm} + +\begin{algorithm}{Cycle Counting} + \begin{methods} + \method{findBase}{berechnet Basis}{\abs{V}\cdot\abs{E}} + \method{count}{zählt Zykel}{2^{\abs{\mathit{base}}}} + \end{methods} + \begin{itemize} + \item jeder Zyklus ist das xor von einträgen in \code{base}. + \end{itemize} + \sourcecode{graph/cycleCounting.cpp} +\end{algorithm} + +\begin{algorithm}{Wert des maximalen Matchings} + Fehlerwahrscheinlichkeit: $\left(\frac{m}{MOD}\right)^I$ + \sourcecode{graph/matching.cpp} +\end{algorithm} + +\begin{algorithm}{Allgemeines maximales Matching} + \begin{methods} + \method{match}{berechnet algemeines Matching}{\abs{E}\*\abs{V}\*\log(\abs{V})} + \end{methods} + \sourcecode{graph/blossom.cpp} +\end{algorithm} + +\begin{algorithm}{Rerooting Template} + \sourcecode{graph/reroot.cpp} +\end{algorithm} + +\begin{algorithm}{Virtual Trees} + \sourcecode{graph/virtualTree.cpp} +\end{algorithm} + +\begin{algorithm}{Maximum Cardinatlity Bipartite Matching} + \label{kuhn} + \begin{methods} + \method{kuhn}{berechnet Matching}{\abs{V}\*\min(ans^2, \abs{E})} + \end{methods} + \begin{itemize} + \item die ersten [0..l) Knoten in \code{adj} sind die linke Seite des Graphen + \end{itemize} + \sourcecode{graph/maxCarBiMatch.cpp} + \begin{methods} + \method{hopcroft\_karp}{berechnet Matching}{\sqrt{\abs{V}}\*\abs{E}} + \end{methods} + \sourcecode{graph/hopcroftKarp.cpp} +\end{algorithm} + +\begin{algorithm}{Global Mincut} + \begin{methods} + \method{stoer\_wagner}{berechnet globalen Mincut}{\abs{V}\abs{E}+\abs{V}^2\*\log(\abs{E})} + \method{merge(a,b)}{merged Knoten $b$ in Knoten $a$}{\abs{E}} + \end{methods} + \textbf{Tipp:} Cut Rekonstruktion mit \code{unionFind} für Partitionierung oder \code{vector} für edge id's im cut. + \sourcecode{graph/stoerWagner.cpp} +\end{algorithm} + +\subsection{Max-Flow} + +\optional{ +\subsubsection{Push Relabel} +\begin{methods} + \method{maxFlow}{gut bei sehr dicht besetzten Graphen.}{\abs{V}^2\*\sqrt{\abs{E}}} + \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} +\end{methods} +\sourcecode{graph/pushRelabel.cpp} +} + +\begin{algorithm}{Min-Cost-Max-Flow} + \begin{methods} + \method{mincostflow}{berechnet Fluss}{\abs{V}^2\cdot\abs{E}^2} + \end{methods} + \sourcecode{graph/minCostMaxFlow.cpp} +\end{algorithm} + +\subsubsection{Dinic's Algorithm mit Capacity Scaling} +\begin{methods} + \method{maxFlow}{doppelt so schnell wie Ford Fulkerson}{\abs{V}^2\cdot\abs{E}} + \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} +\end{methods} +\sourcecode{graph/dinicScaling.cpp} +\vfill\null +\columnbreak + +\optional{ +\subsubsection{Anwendungen} +\begin{itemize} + \item \textbf{Maximum Edge Disjoint Paths}\newline + Finde die maximale Anzahl Pfade von $s$ nach $t$, die keine Kante teilen. + \begin{enumerate} + \item Setze $s$ als Quelle, $t$ als Senke und die Kapazität jeder Kante auf 1. + \item Der maximale Fluss entspricht den unterschiedlichen Pfaden ohne gemeinsame Kanten. + \end{enumerate} + \item \textbf{Maximum Independent Paths}\newline + Finde die maximale Anzahl an Pfaden von $s$ nach $t$, die keinen Knoten teilen. + \begin{enumerate} + \item Setze $s$ als Quelle, $t$ als Senke und die Kapazität jeder Kante \emph{und jedes Knotens} auf 1. + \item Der maximale Fluss entspricht den unterschiedlichen Pfaden ohne gemeinsame Knoten. + \end{enumerate} + \item \textbf{Min-Cut}\newline + Der maximale Fluss ist gleich dem minimalen Schnitt. + Bei Quelle $s$ und Senke $t$, partitioniere in $S$ und $T$. + Zu $S$ gehören alle Knoten, die im Residualgraphen von $s$ aus erreichbar sind (Rückwärtskanten beachten). +\end{itemize} +} + +\begin{algorithm}{Maximum Weight Bipartite Matching} + \begin{methods} + \method{match}{berechnet Matching}{\abs{V}^3} + \end{methods} + \sourcecode{graph/maxWeightBipartiteMatching.cpp} +\end{algorithm} +\vfill\null +\columnbreak + + +\begin{algorithm}[optional]{TSP} + \begin{methods} + \method{TSP}{berechnet eine Tour}{n^2\*2^n} + \end{methods} + \sourcecode{graph/TSP.cpp} +\end{algorithm} + +\begin{algorithm}[optional]{Bitonic TSP} + \begin{methods} + \method{bitonicTSP}{berechnet eine Bitonische Tour}{n^2} + \end{methods} + \sourcecode{graph/bitonicTSPsimple.cpp} +\end{algorithm} + diff --git a/content/graph/havelHakimi.cpp b/content/graph/havelHakimi.cpp new file mode 100644 index 0000000..ac4d67d --- /dev/null +++ b/content/graph/havelHakimi.cpp @@ -0,0 +1,18 @@ +vector> havelHakimi(const vector& deg) { + priority_queue> pq; + for (int i = 0; i < sz(deg); i++) { + if (deg[i] > 0) pq.push({deg[i], i}); + } + vector> adj(sz(deg)); + while (!pq.empty()) { + auto [degV, v] = pq.top(); pq.pop(); + if (sz(pq) < degV) return {}; //impossible + vector> todo(degV); + for (auto& e : todo) e = pq.top(), pq.pop(); + for (auto [degU, u] : todo) { + adj[v].push_back(u); + adj[u].push_back(v); + if (degU > 1) pq.push({degU - 1, u}); + }} + return adj; +} diff --git a/content/graph/hld.cpp b/content/graph/hld.cpp new file mode 100644 index 0000000..65d3f5c --- /dev/null +++ b/content/graph/hld.cpp @@ -0,0 +1,44 @@ +vector> adj; +vector sz, in, out, nxt, par; +int counter; + +void dfs_sz(int v = 0, int from = -1) { + for (auto& u : adj[v]) if (u != from) { + dfs_sz(u, v); + sz[v] += sz[u]; + if (adj[v][0] == from || sz[u] > sz[adj[v][0]]) { + swap(u, adj[v][0]); //changes adj! +}}} + +void dfs_hld(int v = 0, int from = -1) { + par[v] = from; + in[v] = counter++; + for (int u : adj[v]) if (u != from) { + nxt[u] = (u == adj[v][0]) ? nxt[v] : u; + dfs_hld(u, v); + } + out[v] = counter; +} + +void init(int root = 0) { + int n = sz(adj); + sz.assign(n, 1), nxt.assign(n, root), par.assign(n, -1); + in.resize(n), out.resize(n); + counter = 0; + dfs_sz(root); + dfs_hld(root); +} + +template +void for_intervals(int u, int v, F&& f) { + for (;; v = par[nxt[v]]) { + if (in[v] < in[u]) swap(u, v); + f(max(in[u], in[nxt[v]]), in[v] + 1); + if (in[nxt[v]] <= in[u]) return; +}} + +int get_lca(int u, int v) { + for (;; v = par[nxt[v]]) { + if (in[v] < in[u]) swap(u, v); + if (in[nxt[v]] <= in[u]) return u; +}} diff --git a/content/graph/hopcroftKarp.cpp b/content/graph/hopcroftKarp.cpp new file mode 100644 index 0000000..c1f5d1c --- /dev/null +++ b/content/graph/hopcroftKarp.cpp @@ -0,0 +1,47 @@ +vector> adj; +// pairs ist der gematchte Knoten oder -1 +vector pairs, dist, ptr; + +bool bfs(int l) { + queue q; + for(int v = 0; v < l; v++) { + if (pairs[v] < 0) {dist[v] = 0; q.push(v);} + else dist[v] = -1; + } + bool exist = false; + while(!q.empty()) { + int v = q.front(); q.pop(); + for (int u : adj[v]) { + if (pairs[u] < 0) {exist = true; continue;} + if (dist[pairs[u]] < 0) { + dist[pairs[u]] = dist[v] + 1; + q.push(pairs[u]); + }}} + return exist; +} + +bool dfs(int v) { + for (; ptr[v] < sz(adj[v]); ptr[v]++) { + int u = adj[v][ptr[v]]; + if (pairs[u] < 0 || + (dist[pairs[u]] > dist[v] && dfs(pairs[u]))) { + pairs[u] = v; pairs[v] = u; + return true; + }} + return false; +} + +int hopcroft_karp(int l) { // l = #Knoten links + int ans = 0; + pairs.assign(sz(adj), -1); + dist.resize(l); + // Greedy Matching, optionale Beschleunigung. + for (int v = 0; v < l; v++) for (int u : adj[v]) + if (pairs[u] < 0) {pairs[u] = v; pairs[v] = u; ans++; break;} + while(bfs(l)) { + ptr.assign(l, 0); + for(int v = 0; v < l; v++) { + if (pairs[v] < 0) ans += dfs(v); + }} + return ans; +} diff --git a/content/graph/kruskal.cpp b/content/graph/kruskal.cpp new file mode 100644 index 0000000..987d30b --- /dev/null +++ b/content/graph/kruskal.cpp @@ -0,0 +1,9 @@ +sort(all(edges)); +vector mst; +ll cost = 0; +for (Edge& e : edges) { + if (findSet(e.from) != findSet(e.to)) { + unionSets(e.from, e.to); + mst.push_back(e); + cost += e.cost; +}} diff --git a/content/graph/matching.cpp b/content/graph/matching.cpp new file mode 100644 index 0000000..dcaea8c --- /dev/null +++ b/content/graph/matching.cpp @@ -0,0 +1,23 @@ +constexpr int MOD=1'000'000'007, I=10; +vector> adj, mat; + +int max_matching() { + int ans = 0; + mat.assign(sz(adj), {}); + for (int _ = 0; _ < I; _++) { + for (int v = 0; v < sz(adj); v++) { + mat[v].assign(sz(adj), 0); + for (int u : adj[v]) { + if (u < v) { + mat[v][u] = rand() % (MOD - 1) + 1; + mat[u][v] = MOD - mat[v][u]; + }}} + gauss(sz(adj), MOD); //LGS @\sourceref{math/lgsFp.cpp}@ + int rank = 0; + for (auto& row : mat) { + if (*max_element(all(row)) != 0) rank++; + } + ans = max(ans, rank / 2); + } + return ans; +} diff --git a/content/graph/maxCarBiMatch.cpp b/content/graph/maxCarBiMatch.cpp new file mode 100644 index 0000000..e928387 --- /dev/null +++ b/content/graph/maxCarBiMatch.cpp @@ -0,0 +1,25 @@ +vector> adj; +vector pairs; // Der gematchte Knoten oder -1. +vector visited; + +bool dfs(int v) { + if (visited[v]) return false; + visited[v] = true; + for (int u : adj[v]) if (pairs[u] < 0 || dfs(pairs[u])) { + pairs[u] = v; pairs[v] = u; return true; + } + return false; +} + +int kuhn(int l) { // l = #Knoten links. + pairs.assign(sz(adj), -1); + int ans = 0; + // Greedy Matching. Optionale Beschleunigung. + for (int v = 0; v < l; v++) for (int u : adj[v]) + if (pairs[u] < 0) {pairs[u] = v; pairs[v] = u; ans++; break;} + for (int v = 0; v < l; v++) if (pairs[v] < 0) { + visited.assign(l, false); + ans += dfs(v); + } + return ans; // Größe des Matchings. +} diff --git a/content/graph/maxWeightBipartiteMatching.cpp b/content/graph/maxWeightBipartiteMatching.cpp new file mode 100644 index 0000000..a2b0a80 --- /dev/null +++ b/content/graph/maxWeightBipartiteMatching.cpp @@ -0,0 +1,50 @@ +double costs[N_LEFT][N_RIGHT]; + +// Es muss l<=r sein! (sonst Endlosschleife) +double match(int l, int r) { + vector lx(l), ly(r); + //xy is matching from l->r, yx from r->l, or -1 + vector xy(l, -1), yx(r, -1); + vector> slack(r); + + for (int x = 0; x < l; x++) + lx[x] = *max_element(costs[x], costs[x] + r); + for (int root = 0; root < l; root++) { + vector aug(r, -1); + vector s(l); + s[root] = true; + for (int y = 0; y < r; y++) { + slack[y] = {lx[root] + ly[y] - costs[root][y], root}; + } + int y = -1; + while (true) { + double delta = INF; + int x = -1; + for (int yy = 0; yy < r; yy++) { + if (aug[yy] < 0 && slack[yy].first < delta) { + tie(delta, x) = slack[yy]; + y = yy; + }} + if (delta > 0) { + for (int x = 0; x < l; x++) if (s[x]) lx[x] -= delta; + for (int y = 0; y < r; y++) { + if (aug[y] >= 0) ly[y] += delta; + else slack[y].first -= delta; + }} + aug[y] = x; + x = yx[y]; + if (x < 0) break; + s[x] = true; + for (int y = 0; y < r; y++) { + if (aug[y] < 0) { + double alt = lx[x] + ly[y] - costs[x][y]; + if (slack[y].first > alt) { + slack[y] = {alt, x}; + }}}} + while (y >= 0) { + yx[y] = aug[y]; + swap(y, xy[aug[y]]); + }} + return accumulate(all(lx), 0.0) + + accumulate(all(ly), 0.0); // Wert des Matchings +} diff --git a/content/graph/minCostMaxFlow.cpp b/content/graph/minCostMaxFlow.cpp new file mode 100644 index 0000000..14a222c --- /dev/null +++ b/content/graph/minCostMaxFlow.cpp @@ -0,0 +1,66 @@ +constexpr ll INF = 1LL << 60; // Größer als der maximale Fluss. +struct MinCostFlow { + struct edge { + int to; + ll f, cost; + }; + vector edges; + vector> adj; + vector pref, con; + vector dist; + const int s, t; + ll maxflow, mincost; + + MinCostFlow(int n, int source, int target) : + adj(n), s(source), t(target) {}; + + void addEdge(int u, int v, ll c, ll cost) { + adj[u].push_back(sz(edges)); + edges.push_back({v, c, cost}); + adj[v].push_back(sz(edges)); + edges.push_back({u, 0, -cost}); + } + + bool SPFA() { + pref.assign(sz(adj), -1); + dist.assign(sz(adj), INF); + vector inqueue(sz(adj)); + queue queue; + dist[s] = 0; + queue.push(s); + pref[s] = s; + inqueue[s] = true; + while (!queue.empty()) { + int cur = queue.front(); queue.pop(); + inqueue[cur] = false; + for (int id : adj[cur]) { + int to = edges[id].to; + if (edges[id].f > 0 && + dist[to] > dist[cur] + edges[id].cost) { + dist[to] = dist[cur] + edges[id].cost; + pref[to] = cur; + con[to] = id; + if (!inqueue[to]) { + inqueue[to] = true; + queue.push(to); + }}}} + return pref[t] != -1; + } + + void extend() { + ll w = INF; + for (int u = t; pref[u] != u; u = pref[u]) + w = min(w, edges[con[u]].f); + maxflow += w; + mincost += dist[t] * w; + for (int u = t; pref[u] != u; u = pref[u]) { + edges[con[u]].f -= w; + edges[con[u] ^ 1].f += w; + }} + + void mincostflow() { + con.assign(sz(adj), 0); + maxflow = mincost = 0; + while (SPFA()) extend(); + } +}; diff --git a/content/graph/pushRelabel.cpp b/content/graph/pushRelabel.cpp new file mode 100644 index 0000000..73a9eae --- /dev/null +++ b/content/graph/pushRelabel.cpp @@ -0,0 +1,64 @@ +struct Edge { + int to, rev; + ll f, c; +}; + +vector> adj; +vector> hs; +vector ec; +vector cur, H; + +void addEdge(int u, int v, ll c) { + adj[u].push_back({v, (int)sz(adj[v]), 0, c}); + adj[v].push_back({u, (int)sz(adj[u])-1, 0, 0}); +} + +void addFlow(Edge& e, ll f) { + if (ec[e.to] == 0 && f > 0) + hs[H[e.to]].push_back(e.to); + e.f += f; + adj[e.to][e.rev].f -= f; + ec[e.to] += f; + ec[adj[e.to][e.rev].to] -= f; +} + +ll maxFlow(int s, int t) { + int n = sz(adj); + hs.assign(2*n, {}); + ec.assign(n, 0); + cur.assign(n, 0); + H.assign(n, 0); + H[s] = n; + ec[t] = 1;//never set t to active... + vector co(2*n); + co[0] = n - 1; + for (Edge& e : adj[s]) addFlow(e, e.c); + for (int hi = 0;;) { + while (hs[hi].empty()) if (!hi--) return -ec[s]; + int v = hs[hi].back(); + hs[hi].pop_back(); + while (ec[v] > 0) { + if (cur[v] == sz(adj[v])) { + H[v] = 2*n; + for (int i = 0; i < sz(adj[v]); i++) { + Edge& e = adj[v][i]; + if (e.c - e.f > 0 && + H[v] > H[e.to] + 1) { + H[v] = H[e.to] + 1; + cur[v] = i; + }} + co[H[v]]++; + if (!--co[hi] && hi < n) { + for (int i = 0; i < n; i++) { + if (hi < H[i] && H[i] < n) { + co[H[i]]--; + H[i] = n + 1; + }}} + hi = H[v]; + } else { + Edge& e = adj[v][cur[v]]; + if (e.c - e.f > 0 && H[v] == H[e.to] + 1) { + addFlow(adj[v][cur[v]], min(ec[v], e.c - e.f)); + } else { + cur[v]++; +}}}}} diff --git a/content/graph/reroot.cpp b/content/graph/reroot.cpp new file mode 100644 index 0000000..4c6a748 --- /dev/null +++ b/content/graph/reroot.cpp @@ -0,0 +1,62 @@ +// Usual Tree DP can be broken down in 4 steps: +// - Initialize dp[v] = identity +// - Iterate over all children w and take a value for w +// by looking at dp[w] and possibly the edge label of v -> w +// - combine the values of those children +// usually this operation should be commutative and associative +// - finalize the dp[v] after iterating over all children +struct Reroot { + using T = ll; + + // identity element + T E() {} + // x: dp value of child + // e: index of edge going to child + T takeChild(T x, int e) {} + T comb(T x, T y) {} + // called after combining all dp values of children + T fin(T x, int v) {} + + vector>> g; + vector ord, pae; + vector dp; + + T dfs(int v) { + ord.push_back(v); + for (auto [w, e] : g[v]) { + g[w].erase(find(all(g[w]), pair(v, e^1))); + pae[w] = e^1; + dp[v] = comb(dp[v], takeChild(dfs(w), e)); + } + return dp[v] = fin(dp[v], v); + } + + vector solve(int n, vector> edges) { + g.resize(n); + for (int i = 0; i < n-1; i++) { + g[edges[i].first].emplace_back(edges[i].second, 2*i); + g[edges[i].second].emplace_back(edges[i].first, 2*i+1); + } + pae.assign(n, -1); + dp.assign(n, E()); + dfs(0); + vector updp(n, E()), res(n, E()); + for (int v : ord) { + vector pref(sz(g[v])+1), suff(sz(g[v])+1); + if (v != 0) pref[0] = takeChild(updp[v], pae[v]); + for (int i = 0; i < sz(g[v]); i++){ + auto [u, w] = g[v][i]; + pref[i+1] = suff[i] = takeChild(dp[u], w); + pref[i+1] = comb(pref[i], pref[i+1]); + } + for (int i = sz(g[v])-1; i >= 0; i--) { + suff[i] = comb(suff[i], suff[i+1]); + } + for (int i = 0; i < sz(g[v]); i++) { + updp[g[v][i].first] = fin(comb(pref[i], suff[i+1]), v); + } + res[v] = fin(pref.back(), v); + } + return res; + } +}; diff --git a/content/graph/scc.cpp b/content/graph/scc.cpp new file mode 100644 index 0000000..ac9a40b --- /dev/null +++ b/content/graph/scc.cpp @@ -0,0 +1,32 @@ +vector> adj, sccs; +int counter; +vector inStack; +vector low, idx, s; //idx enthält Index der SCC pro Knoten. + +void visit(int v) { + int old = low[v] = counter++; + s.push_back(v); inStack[v] = true; + + for (auto u : adj[v]) { + if (low[u] < 0) visit(u); + if (inStack[u]) low[v] = min(low[v], low[u]); + } + + if (old == low[v]) { + sccs.push_back({}); + for (int u = -1; u != v;) { + u = s.back(); s.pop_back(); inStack[u] = false; + idx[u] = sz(sccs) - 1; + sccs.back().push_back(u); +}}} + +void scc() { + inStack.assign(sz(adj), false); + low.assign(sz(adj), -1); + idx.assign(sz(adj), -1); + sccs.clear(); + + counter = 0; + for (int i = 0; i < sz(adj); i++) { + if (low[i] < 0) visit(i); +}} diff --git a/content/graph/stoerWagner.cpp b/content/graph/stoerWagner.cpp new file mode 100644 index 0000000..97e667a --- /dev/null +++ b/content/graph/stoerWagner.cpp @@ -0,0 +1,53 @@ +struct Edge { + int from, to; + ll cap; +}; + +vector> adj, tmp; +vector erased; + +void merge(int u, int v) { + tmp[u].insert(tmp[u].end(), all(tmp[v])); + tmp[v].clear(); + erased[v] = true; + for (auto& vec : tmp) { + for (Edge& e : vec) { + if (e.from == v) e.from = u; + if (e.to == v) e.to = u; +}}} + +ll stoer_wagner() { + ll res = INF; + tmp = adj; + erased.assign(sz(tmp), false); + for (int i = 1; i < sz(tmp); i++) { + int s = 0; + while (erased[s]) s++; + priority_queue> pq; + pq.push({0, s}); + vector con(sz(tmp)); + ll cur = 0; + vector> state; + while (!pq.empty()) { + int c = pq.top().second; + pq.pop(); + if (con[c] < 0) continue; //already seen + con[c] = -1; + for (auto e : tmp[c]) { + if (con[e.to] >= 0) {//add edge to cut + con[e.to] += e.cap; + pq.push({con[e.to], e.to}); + cur += e.cap; + } else if (e.to != c) {//remove edge from cut + cur -= e.cap; + }} + state.push_back({cur, c}); + } + int t = state.back().second; + state.pop_back(); + if (state.empty()) return 0; //graph is not connected?! + merge(state.back().second, t); + res = min(res, state.back().first); + } + return res; +} diff --git a/content/graph/treeIsomorphism.cpp b/content/graph/treeIsomorphism.cpp new file mode 100644 index 0000000..355fefb --- /dev/null +++ b/content/graph/treeIsomorphism.cpp @@ -0,0 +1,15 @@ +vector> adj; +map, int> known; // dont reset! + +int treeLabel(int v, int from = -1) { + vector children; + for (int u : adj[v]) { + if (u == from) continue; + children.push_back(treeLabel(u, v)); + } + sort(all(children)); + if (known.find(children) == known.end()) { + known[children] = sz(known); + } + return known[children]; +} diff --git a/content/graph/virtualTree.cpp b/content/graph/virtualTree.cpp new file mode 100644 index 0000000..27d2d6c --- /dev/null +++ b/content/graph/virtualTree.cpp @@ -0,0 +1,22 @@ +// needs dfs in- and out- time and lca function +vector in, out; + +void virtualTree(vector ind) { // indices of used nodes + sort(all(ind), [&](int x, int y) {return in[x] < in[y];}); + for (int i = 0, n = sz(ind); i < n - 1; i++) { + ind.push_back(lca(ind[i], ind[i + 1])); + } + sort(all(ind), [&](int x, int y) {return in[x] < in[y];}); + ind.erase(unique(all(ind)), ind.end()); + + int n = ind.size(); + vector> tree(n); + vector st = {0}; + for (int i = 1; i < n; i++) { + while (in[ind[i]] >= out[ind[st.back()]]) st.pop_back(); + tree[st.back()].push_back(i); + st.push_back(i); + } + // virtual directed tree with n nodes, original indices in ind + // weights can be calculated, e.g. with binary lifting +} -- cgit v1.2.3