diff options
Diffstat (limited to 'graph')
35 files changed, 1380 insertions, 573 deletions
diff --git a/graph/2sat.cpp b/graph/2sat.cpp index ab38b02..2ebb11a 100644 --- a/graph/2sat.cpp +++ b/graph/2sat.cpp @@ -1,58 +1,28 @@ struct sat2 { - vector<vector<int>> adjlist, sccs; - vector<bool> visited, inStack; - int n, sccCounter, dfsCounter; - vector<int> d, low, idx, sol; - stack<int> s; + int n; // + scc variablen + vector<int> sol; - sat2(int vars) : n(vars*2) { adjlist.resize(n); }; + sat2(int vars) : n(vars*2), adjlist(vars*2) {}; - static int var(int i) { return i << 1; } + static int var(int i) {return i << 1;} void addImpl(int v1, int v2) { adjlist[v1].push_back(v2); adjlist[1^v2].push_back(1^v1); } - void addEquiv(int v1, int v2) { addImpl(v1, v2); addImpl(v2, v1); } - void addOr(int v1, int v2) { addImpl(1^v1, v2); } - void addXor(int v1, int v2) { addOr(v1, v2); addOr(1^v1, 1^v2); } - void addTrue(int v1) { addImpl(1^v1, v1); } - void addFalse(int v1) { addTrue(1^v1); } - void addAnd(int v1, int v2) { addTrue(v1); addTrue(v2); } - void addNand(int v1, int v2) { addOr(1^v1, 1^v2); } - - void dfs(int v) { - visited[v] = true; - d[v] = low[v] = dfsCounter++; - s.push(v); inStack[v] = true; - - for (auto w : adjlist[v]) { - if (!visited[w]) { - dfs(w); - low[v] = min(low[v], low[w]); - } else if (inStack[w]) low[v] = min(low[v], low[w]); - } - - if (d[v] == low[v]) { - sccs.push_back(vector<int>()); - int w; - do { - w = s.top(); s.pop(); inStack[w] = false; - idx[w] = sccCounter; - sccs[sccCounter].push_back(w); - } while (w != v); - sccCounter++; - }} + void addEquiv(int v1, int v2) {addImpl(v1, v2); addImpl(v2, v1);} + void addOr(int v1, int v2) {addImpl(1^v1, v2);} + void addXor(int v1, int v2) {addOr(v1, v2); addOr(1^v1, 1^v2);} + void addTrue(int v1) {addImpl(1^v1, v1);} + void addFalse(int v1) {addTrue(1^v1);} + void addAnd(int v1, int v2) {addTrue(v1); addTrue(v2);} + void addNand(int v1, int v2) {addOr(1^v1, 1^v2);} bool solvable() { - visited.assign(n, false); - inStack.assign(n, false); - d.assign(n, -1); - low.assign(n, -1); - idx.assign(n, -1); - sccCounter = dfsCounter = 0; - for (int i = 0; i < n; i++) if (!visited[i]) dfs(i); - for (int i = 0; i < n; i += 2) if (idx[i] == idx[i + 1]) return false; + scc(); //scc code von oben + for (int i = 0; i < n; i += 2) { + if (idx[i] == idx[i + 1]) return false; + } return true; } diff --git a/graph/LCA.cpp b/graph/LCA.cpp new file mode 100644 index 0000000..bdb8f12 --- /dev/null +++ b/graph/LCA.cpp @@ -0,0 +1,24 @@ +vector<vector<int>> adjlist(); +vector<int> visited(); +vector<int> first(); +vector<int> depth(); + +void initLCA(int gi, int d, int& c) { + visited[c] = gi, depth[c] = d, first[gi] = min(c, first[gi]), c++; + for(int gn : adjlist[gi]) { + initLCA(gn, d+1, c); + visited[c] = gi, depth[c] = d, c++; +}} + +int getLCA(int a, int b) { + return visited[query(min(first[a], first[b]), max(first[a], first[b]))]; +} + +void exampleUse() { + int c = 0; + visited = vector<int>(2*adjlist.size()); + first = vector<int>(adjlist.size(), 2*adjlist.size()); + depth = vector<int>(2*adjlist.size()); + initLCA(0, 0, c); + init(depth); +} diff --git a/graph/LCA_sparse.cpp b/graph/LCA_sparse.cpp new file mode 100644 index 0000000..2a38528 --- /dev/null +++ b/graph/LCA_sparse.cpp @@ -0,0 +1,32 @@ +struct LCA { + vector<ll> depth; + vector<int> visited, first; + int idx; + SparseTable st; //sparse table von oben + + void init(vector<vector<int>>& g, int root) { + depth.assign(2 * sz(g), 0); + visited.assign(2 * sz(g), -1); + first.assign(sz(g), 2 * sz(g)); + idx = 0; + visit(g, root); + st.init(&depth); + } + + void visit(vector<vector<int>>& g, int v, ll d=0, int p=-1) { + visited[idx] = v, depth[idx] = d; + first[v] = min(idx, first[v]), idx++; + + for (int w : g[v]) { + if (first[w] == 2 * sz(g)) { + visit(g, w, d + 1, v); + visited[idx] = v, depth[idx] = d, idx++; + }}} + + int getLCA(int a, int b) { + if (first[a] > first[b]) swap(a, b); + return visited[st.queryIdempotent(first[a], first[b] + 1)]; + } + + ll getDepth(int a) {eturn depth[first[a]];} +}; diff --git a/graph/TSP.cpp b/graph/TSP.cpp index 7fba361..0f72766 100644 --- a/graph/TSP.cpp +++ b/graph/TSP.cpp @@ -1,23 +1,28 @@ -// Laufzeit: O(n^2*2^n) -vector<vector<int>> dist; // Entfernung zwischen je zwei Punkten. -vector<int> TSP() { - int n = dist.size(), m = 1 << n; - vector<vector<ii>> dp(n, vector<ii>(m, ii(INF, -1))); +vector<vector<ll>> dist; // Entfernung zwischen je zwei Punkten. + +void TSP() { + int n = sz(dist), m = 1 << n; + vector<vector<edge>> dp(n, vector<edge>(m, edge{INF, -1})); - for(int c = 0; c < n; c++) dp[c][m-1].first = dist[c][0], dp[c][m-1].second = 0; + for (int c = 0; c < n; c++) + dp[c][m-1].dist = dist[c][0], dp[c][m-1].to = 0; - for(int v = m - 2; v >= 0; v--) { - for(int c = n - 1; c >= 0; c--) { - for(int g = 0; g < n; g++) { - if(g != c && !((1 << g) & v)) { - if((dp[g][(v | (1 << g))].first + dist[c][g]) < dp[c][v].first) { - dp[c][v].first = dp[g][(v | (1 << g))].first + dist[c][g]; - dp[c][v].second = g; + for (int v = m - 2; v >= 0; v--) { + for (int c = n - 1; c >= 0; c--) { + for (int g = 0; g < n; g++) { + if (g != c && !((1 << g) & v)) { + if ((dp[g][(v | (1 << g))].dist + dist[c][g]) < + dp[c][v].dist) { + dp[c][v].dist = + dp[g][(v | (1 << g))].dist + dist[c][g]; + dp[c][v].to = g; }}}}} + // return dp[0][1]; // Länge der Tour - vector<int> res; res.push_back(0); int v = 0; - while(res.back() != 0 || res.size() == 1) { - res.push_back(dp[res.back()][(v |= (1 << res.back()))].second); - } - return res; // Enthält Knoten 0 zweimal. An erster und letzter Position. + vector<int> tour; tour.push_back(0); int v = 0; + while (tour.back() != 0 || sz(tour) == 1) + tour.push_back(dp[tour.back()] + [(v |= (1 << tour.back()))].to); + // Enthält Knoten 0 zweimal. An erster und letzter Position. + // return tour; } diff --git a/graph/articulationPoints.cpp b/graph/articulationPoints.cpp index fba08bb..e173355 100644 --- a/graph/articulationPoints.cpp +++ b/graph/articulationPoints.cpp @@ -1,33 +1,45 @@ -// Laufzeit: O(|V|+|E|) -vector<vector<int>> adjlist; +vector<vector<edge>> adjlist; +vector<int> num; +int counter, rootCount, root; vector<bool> isArt; -vector<int> d, low; -int counter, root, rootCount; // rootCount >= 2 <=> root Artikulationspunkt -vector<ii> bridges; // Nur fuer Brücken. +vector<edge> bridges, st; +vector<vector<edge>> bcc; -void dfs(int v, int parent = -1) { - d[v] = low[v] = ++counter; - if (parent == root) ++rootCount; - - for (auto w : adjlist[v]) { - if (!d[w]) { - dfs(w, v); - if (low[w] >= d[v] && v != root) isArt[v] = true; - if (low[w] > d[v]) bridges.push_back(ii(v, w)); - low[v] = min(low[v], low[w]); - } else if (w != parent) { - low[v] = min(low[v], d[w]); -}}} +int dfs(int v, int parent = -1) { + int me = num[v] = ++counter, top = me; + for (edge& e : adjlist[v]) { + if (e.id == parent){} + else if (num[e.to]) { + top = min(top, num[e.to]); + if (num[e.to] < me) st.push_back(e); + } else { + if (v == root) rootCount++; + int si = sz(st); + int up = dfs(e.to, e.id); + top = min(top, up); + if (up >= me) isArt[v] = true; + if (up > me) bridges.push_back(e); + if (up <= me) st.push_back(e); + if (up == me) { + bcc.emplace_back(); + while (sz(st) > si) { + bcc.back().push_back(st.back()); + st.pop_back(); + }}}} + return top; +} void findArticulationPoints() { counter = 0; - low.resize(adjlist.size()); - d.assign(adjlist.size(), 0); - isArt.assign(adjlist.size(), false); - bridges.clear(); //nur fuer Bruecken - for (int v = 0; v < (int)adjlist.size(); v++) { - if (!d[v]) { - root = v; rootCount = 0; + num.assign(sz(adjlist), 0); + isArt.assign(sz(adjlist), false); + bridges.clear(); + st.clear(); + bcc.clear(); + for (int v = 0; v < sz(adjlist); v++) { + if (!num[v]) { + root = v; + rootCount = 0; dfs(v); - if (rootCount > 1) isArt[v] = true; -}}} + isArt[v] = rootCount > 1; +}}}
\ No newline at end of file diff --git a/graph/bellmannFord.cpp b/graph/bellmannFord.cpp index d3d6094..21c7dbe 100644 --- a/graph/bellmannFord.cpp +++ b/graph/bellmannFord.cpp @@ -1,20 +1,19 @@ -// Laufzeit: O(|V|*|E|) -vector<edge> edges; // Kanten einfügen! -vector<int> dist, parent; - -void bellmannFord() { - dist.assign(NUM_VERTICES, INF); dist[0] = 0; - parent.assign(NUM_VERTICES, -1); - for (int i = 0; i < NUM_VERTICES - 1; i++) { - for (auto &e : edges) { - if (dist[e.from] + e.cost < dist[e.to]) { +void bellmannFord(int n, vector<edge> edges, int start) { + vector<ll> dist(n, INF), parent(n, -1); + dist[start] = 0; + + for (int i = 1; i < n; i++) { + for (edge& e : edges) { + if (dist[e.from] != INF && + dist[e.from] + e.cost < dist[e.to]) { dist[e.to] = dist[e.from] + e.cost; parent[e.to] = e.from; }}} - // "dist" und "parent" sind korrekte kürzeste Pfade. - // Folgende Zeilen prüfen nur negative Kreise. - for (auto &e : edges) { - if (dist[e.from] + e.cost < dist[e.to]) { + for (edge& e : edges) { + if (dist[e.from] != INF && + dist[e.from] + e.cost < dist[e.to]) { // Negativer Kreis gefunden. -}}} + }} + //return dist, parent; +} diff --git a/graph/bitonicTSP.cpp b/graph/bitonicTSP.cpp index 2a8b5f9..767bc5b 100644 --- a/graph/bitonicTSP.cpp +++ b/graph/bitonicTSP.cpp @@ -1,24 +1,31 @@ -// Laufzeit: O(n^2) -vector<vector<double>> dp, dist; // Entfernungen zwischen Punkten. +vector<vector<double>> dist; // Initialisiere mit Entfernungen zwischen Punkten. -double get(int p1, int p2) { - int v = max(p1, p2) + 1; - if (v == dist.size()) return dist[p1][v - 1] + dist[p2][v - 1]; - if (dp[p1][p2] >= 0.0) return dp[p1][p2]; - double tryLR = dist[p1][v] + get(v, p2); - double tryRL = dist[p2][v] + get(p1, v); - return dp[p1][p2] = min(tryLR, tryRL); -} +void bitonicTSP() { + vector<double> dp(sz(dist), HUGE_VAL); + vector<int> pre(sz(dist)); // nur für Tour + dp[0] = 0; dp[1] = 2 * dist[0][1]; pre[1] = 0; + for (unsigned int i = 2; i < sz(dist); i++) { + double link = 0; + for (int j = i - 2; j >= 0; j--) { + link += dist[j + 1][j + 2]; + double opt = link + dist[j][i] + dp[j + 1] - dist[j][j + 1]; + if (opt < dp[i]) { + dp[i] = opt; + pre[i] = j; + }}} + // return dp.back(); // Länger der Tour -void bitonicTour() { - dp.assign(dist.size(), vector<double>(dist.size(), -1)); - get(0, 0); // return dp[0][0]; // Länger der Tour - vector<int> lr = {0}, rl = {0}; - for (int p1 = 0, p2 = 0, v; (v = max(p1, p2) + 1) < dist.size();) { - if (dp[p1][p2] == dist[p1][v] + dp[v][p2]) { - lr.push_back(v); p1 = v; - } else { - rl.push_back(v); p2 = v; - }} - lr.insert(lr.end(), rl.rbegin(), rl.rend()); // Tour, Knoten 0 doppelt. -} + int j, n = sz(dist) - 1; + vector<int> ut, lt = {n, n - 1}; + do { + j = pre[n]; + (lt.back() == n ? lt : ut).push_back(j); + for (int i = n - 1; i > j + 1; i--) { + (lt.back() == i ? lt : ut).push_back(i - 1); + } + } while(n = j + 1, j > 0); + (lt.back() == 1 ? lt : ut).push_back(0); + reverse(lt.begin(), lt.end()); + lt.insert(lt.end(), ut.begin(), ut.end()); + //return lt;// Enthält Knoten 0 zweimal. An erster und letzter Position. +}
\ No newline at end of file diff --git a/graph/bitonicTSPsimple.cpp b/graph/bitonicTSPsimple.cpp new file mode 100644 index 0000000..ff605d9 --- /dev/null +++ b/graph/bitonicTSPsimple.cpp @@ -0,0 +1,28 @@ +vector<vector<double>> dist; // Entfernungen zwischen Punkten. +vector<vector<double>> dp; + +double get(int p1, int p2) { + int v = max(p1, p2) + 1; + if (v == sz(dist)) return dist[p1][v - 1] + dist[p2][v - 1]; + if (dp[p1][p2] >= 0.0) return dp[p1][p2]; + double tryLR = dist[p1][v] + get(v, p2); + double tryRL = dist[p2][v] + get(p1, v); + return dp[p1][p2] = min(tryLR, tryRL); +} + +void bitonicTour() { + dp = vector<vector<double>>(sz(dist), + vector<double>(sz(dist), -1)); + get(0, 0); + // return dp[0][0]; // Länger der Tour + vector<int> lr = {0}, rl = {0}; + for (int p1 = 0, p2 = 0, v; (v = max(p1, p2)+1) < sz(dist);) { + if (dp[p1][p2] == dist[p1][v] + dp[v][p2]) { + lr.push_back(v); p1 = v; + } else { + rl.push_back(v); p2 = v; + }} + lr.insert(lr.end(), rl.rbegin(), rl.rend()); + // Enthält Knoten 0 zweimal. An erster und letzter Position. + // return lr; +}
\ No newline at end of file diff --git a/graph/blossom.cpp b/graph/blossom.cpp new file mode 100644 index 0000000..cc19a2b --- /dev/null +++ b/graph/blossom.cpp @@ -0,0 +1,84 @@ +struct GM { + vector<vector<int>> adjlist; + // pairs ist der gematchte knoten oder n + vector<int> pairs, first, que; + vector<pair<int, int>> label; + int head, tail; + + GM(int n) : adjlist(n), pairs(n + 1, n), first(n + 1, n), + que(n), label(n + 1, {-1, -1}) {} + + void rematch(int v, int w) { + int t = pairs[v]; pairs[v] = w; + if (pairs[t] != v) return; + if (label[v].second == -1) { + pairs[t] = label[v].first; + rematch(pairs[t], t); + } else { + int x = label[v].first; + int y = label[v].second; + rematch(x, y); + rematch(y, x); + }} + + int findFirst(int u) { + return label[first[u]].first < 0 ? first[u] + : first[u] = findFirst(first[u]); + } + + void relabel(int x, int y) { + int r = findFirst(x); + int s = findFirst(y); + if (r == s) return; + auto h = label[r] = label[s] = {~x, y}; + int join; + while (true) { + if (s != sz(adjlist)) swap(r, s); + r = findFirst(label[pairs[r]].first); + if (label[r] == h) { + join = r; + break; + } else { + label[r] = h; + }} + for (int v : {first[x], first[y]}) { + for (; v != join; v = first[label[pairs[v]].first]) { + label[v] = {x, y}; + first[v] = join; + que[tail++] = v; + }}} + + bool augment(int u) { + label[u] = {sz(adjlist), -1}; + first[u] = sz(adjlist); + head = tail = 0; + for (que[tail++] = u; head < tail;) { + int x = que[head++]; + for (int y : adjlist[x]) { + if (pairs[y] == sz(adjlist) && y != u) { + pairs[y] = x; + rematch(x, y); + return true; + } else if (label[y].first >= 0) { + relabel(x, y); + } else if (label[pairs[y]].first == -1) { + label[pairs[y]].first = x; + first[pairs[y]] = y; + que[tail++] = pairs[y]; + }}} + return false; + } + + int match() { + int matching = head = tail = 0; + for (int u = 0; u < sz(adjlist); u++) { + if (pairs[u] < sz(adjlist) || !augment(u)) continue; + matching++; + for (int i = 0; i < tail; i++) + label[que[i]] = label[pairs[que[i]]] = {-1, -1}; + label[sz(adjlist)] = {-1, -1}; + } + return matching; + } + +};
\ No newline at end of file diff --git a/graph/bronKerbosch.cpp b/graph/bronKerbosch.cpp new file mode 100644 index 0000000..caf2421 --- /dev/null +++ b/graph/bronKerbosch.cpp @@ -0,0 +1,24 @@ +using bits = bitset<64>; +vector<bits> adj, cliques; + +void addEdge(int a, int b) { + if (a != b) adj[a][b] = adj[b][a] = 1; +} + +void bronKerboschRec(bits R, bits P, bits X) { + if (!P.any() && !X.any()) { + cliques.push_back(R); + } else { + int q = (P | X)._Find_first(); + bits cands = P & ~adj[q]; + for (int i = 0; i < sz(adj); i++) if (cands[i]){ + R[i] = 1; + bronKerboschRec(P & adj[i], X & adj[i], R); + R[i] = P[i] = 0; + X[i] = 1; +}}} + +void bronKerbosch() { + cliques.clear(); + bronKerboschRec({}, {(1ull << sz(adj)) - 1}, {}); +} diff --git a/graph/capacityScaling.cpp b/graph/capacityScaling.cpp index 8571e2b..b59c322 100644 --- a/graph/capacityScaling.cpp +++ b/graph/capacityScaling.cpp @@ -1,35 +1,44 @@ -// Ford Fulkerson mit Capacity Scaling. Laufzeit: O(|E|^2*log(C)) -static const int MAX_N = 500; // #Knoten, egal für die Laufzeit. -struct edge { int dest, rev; ll cap, flow; }; -vector<edge> adjlist[MAX_N]; -int visited[MAX_N], target, dfsCounter; +struct edge { + int from, to; + ll f, c; +}; + +vector<edge> edges; +vector<vector<int>> adjlist; +int s, t, dfsCounter; +vector<int> visited; ll capacity; -bool dfs(int x) { - if (x == target) return 1; - if (visited[x] == dfsCounter) return 0; - visited[x] = dfsCounter; - for (edge &e : adjlist[x]) { - if (e.cap >= capacity && dfs(e.dest)) { - e.cap -= capacity; adjlist[e.dest][e.rev].cap += capacity; - e.flow += capacity; adjlist[e.dest][e.rev].flow -= capacity; - return 1; - }} - return 0; +void addEdge(int from, int to, ll c) { + adjlist[from].push_back(edges.size()); + edges.push_back({from, to, 0, c}); + adjlist[to].push_back(edges.size()); + edges.push_back({to, from, 0, 0}); } -void addEdge(int u, int v, ll c) { - adjlist[u].push_back(edge {v, (int)adjlist[v].size(), c, 0}); - adjlist[v].push_back(edge {u, (int)adjlist[u].size() - 1, 0, 0}); +bool dfs(int x) { + if (x == t) return true; + if (visited[x] == dfsCounter) return false; + visited[x] = dfsCounter; + for (int id : adjlist[x]) { + if (edges[id].c >= capacity && dfs(edges[id].to)) { + edges[id].c -= capacity; edges[id ^ 1].c += capacity; + edges[id].f += capacity; edges[id ^ 1].f -= capacity; + return true; + }} + return false; } -ll maxFlow(int s, int t) { - capacity = 1L << 62; - target = t; - ll flow = 0L; - while (capacity) { - while (dfsCounter++, dfs(s)) flow += capacity; - capacity /= 2; - } - return flow; +ll maxFlow(int source, int target) { + capacity = 1ll << 62; + s = source; + t = target; + ll flow = 0; + visited.assign(adjlist.size(), 0); + dfsCounter = 0; + while (capacity) { + while (dfsCounter++, dfs(s)) flow += capacity; + capacity /= 2; + } + return flow; } diff --git a/graph/centroid.cpp b/graph/centroid.cpp new file mode 100644 index 0000000..d2855e2 --- /dev/null +++ b/graph/centroid.cpp @@ -0,0 +1,22 @@ +vector<int> s; +void dfs1(int u, int v = -1) { + s[u] = 1; + for (int w : adj[u]) { + if (w == v) continue; + dfs1(w, u); + s[u] += s[w]; +}} + +pair<int, int> dfs2(int u, int v, int n) { + for (int w : adj[u]) { + if (2 * s[w] == n) return {u, w}; + if (w != v && 2 * s[w] > n) return dfs2(w, u, n); + } + return {u, -1}; +} + +pair<int, int> find_centroid(int root) { + // s muss nicht initialisiert werden, nur groß genug sein + dfs1(root); + return dfs2(root, -1, s[root]); +} diff --git a/graph/connect.cpp b/graph/connect.cpp new file mode 100644 index 0000000..28a6f6c --- /dev/null +++ b/graph/connect.cpp @@ -0,0 +1,31 @@ +struct connect { + int n; + vector<pair<int, int>> edges; + LCT lct; // min LCT no updates required + + connect(int n, int m) : n(n), edges(m), lct(n+m) {} + + bool connected(int a, int b) { + return lct.connected(&lct.nodes[a], &lct.nodes[b]); + } + + void addEdge(int a, int b, int id) { + lct.nodes[id + n] = LCT::Node(id + n, id + n); + edges[id] = {a, b}; + if (connected(a, b)) { + int old = lct.query(&lct.nodes[a], &lct.nodes[b]); + if (old < id) eraseEdge(old); + } + if (!connected(a, b)) { + lct.link(&lct.nodes[a], &lct.nodes[id + n]); + lct.link(&lct.nodes[b], &lct.nodes[id + n]); + }} + + void eraseEdge(ll id) { + if (connected(edges[id].first, edges[id].second) && + lct.query(&lct.nodes[edges[id].first], + &lct.nodes[edges[id].second]) == id) { + lct.cut(&lct.nodes[edges[id].first], &lct.nodes[id + n]); + lct.cut(&lct.nodes[edges[id].second], &lct.nodes[id + n]); + }} +};
\ No newline at end of file diff --git a/graph/cycleCounting.cpp b/graph/cycleCounting.cpp new file mode 100644 index 0000000..800f27e --- /dev/null +++ b/graph/cycleCounting.cpp @@ -0,0 +1,65 @@ +constexpr ll maxEdges = 128; +using cycle = bitset<maxEdges>; +struct cylces { + ll n; + vector<vector<pair<ll, ll>>> adj; + vector<bool> seen; + vector<cycle> paths, base; + vector<pair<ll, ll>> edges; + + cylces(ll n) : n(n), adj(n), seen(n), paths(n) {} + + void addEdge(ll a, ll b) { + adj[a].push_back({b, sz(edges)}); + adj[b].push_back({a, sz(edges)}); + edges.push_back({a, b}); + } + + void addBase(cycle cur) { + for (cycle o : base) { + o ^= cur; + if (o._Find_first() > cur._Find_first()) cur = o; + } + if (cur.any()) base.push_back(cur); + } + + void findBase(ll c = 0, ll p = -1, cycle cur = {}) { + if (n == 0) return; + if (seen[c]) { + addBase(cur ^ paths[c]); + } else { + seen[c] = true; + paths[c] = cur; + for (auto e : adj[c]) { + if (e.first == p) continue; + cur[e.second].flip(); + findBase(e.first, c, cur); + cur[e.second].flip(); + }}} + + //cycle must be constrcuted from base + bool isCycle(cycle cur) { + if (cur.none()) return false; + init(n); + for (ll i = 0; i < sz(edges); i++) { + if (cur[i]) { + cur[i] = false; + if (findSet(edges[i].first) == + findSet(edges[i].second)) break; + unionSets(edges[i].first, edges[i].second); + }} + return cur.none(); + }; + + ll count() { + findBase(); + ll res = 0; + for (ll i = 1; i < (1ll << sz(base)); i++) { + cycle cur; + for (ll j = 0; j < sz(base); j++) { + if (((i >> j) & 1) != 0) cur ^= base[j]; + if (isCycle(cur)) res++; + } + return res; + } +}; diff --git a/graph/dfs.tex b/graph/dfs.tex new file mode 100644 index 0000000..1e6705f --- /dev/null +++ b/graph/dfs.tex @@ -0,0 +1,16 @@ +\begin{expandtable} +\begin{tabularx}{\linewidth}{|X|XIXIX|} + \hline + Kantentyp $(v, w)$ & \code{dfs[v] < dfs[w]} & \code{fin[v] > fin[w]} & \code{seen[w]} \\ + %$(v, w)$ & \code{dfs[w]} & \code{fin[w]} & \\ + \hline + in-tree & \code{true} & \code{true} & \code{false} \\ + \grayhline + forward & \code{true} & \code{true} & \code{true} \\ + \grayhline + backward & \code{false} & \code{false} & \code{true} \\ + \grayhline + cross & \code{false} & \code{true} & \code{true} \\ + \hline +\end{tabularx} +\end{expandtable} diff --git a/graph/dijkstra.cpp b/graph/dijkstra.cpp index 52cb57e..0b821dd 100644 --- a/graph/dijkstra.cpp +++ b/graph/dijkstra.cpp @@ -1,17 +1,21 @@ -// Laufzeit: O((|E|+|V|)*log |V|) -void dijkstra(int start) { - priority_queue<ii, vector<ii>, greater<ii> > pq; - vector<int> dist(NUM_VERTICES, INF), parent(NUM_VERTICES, -1); - dist[start] = 0; pq.push(ii(0, start)); +using path = pair<ll, int>; //dist, destination + +void dijkstra(const vector<vector<path>> &adjlist, int start) { + priority_queue<path, vector<path>, greater<path>> pq; + vector<ll> dist(sz(adjlist), INF); + vector<int> prev(sz(adjlist), -1); + dist[start] = 0; pq.emplace(0, start); while (!pq.empty()) { - ii front = pq.top(); pq.pop(); - int curNode = front.second, curDist = front.first; - if (curDist > dist[curNode]) continue; // WICHTIG! - - for (auto n : adjlist[curNode]) { - int nextNode = n.first, nextDist = curDist + n.second; - if (nextDist < dist[nextNode]) { - dist[nextNode] = nextDist; parent[nextNode] = curNode; - pq.push(ii(nextDist, nextNode)); -}}}} + path front = pq.top(); pq.pop(); + if (front.first > dist[front.second]) continue; // WICHTIG! + + for (path e : adjlist[front.second]) { + ll newDist = front.first + e.first; + if (newDist < dist[e.second]) { + dist[e.second] = newDist; + prev[e.second] = front.second; + pq.emplace(newDist, e.second); + }}} + //return dist, prev; +} diff --git a/graph/dinicScaling.cpp b/graph/dinicScaling.cpp index ebbcc27..1b58d29 100644 --- a/graph/dinicScaling.cpp +++ b/graph/dinicScaling.cpp @@ -1,61 +1,63 @@ -// Laufzeit: O(|V|^2*|E|) -// Knoten müssen von 0 nummeriert sein. -const int INF = 0x3FFFFFFF, MAXN = 500; -struct edge { int a, b; ll f, c; }; -int n, m, pt[MAXN], d[MAXN], s, t; -vector<edge> e; -vector<int> g[MAXN]; -ll flow = 0, lim; +struct edge { + int from, to; + ll f, c; +}; + +vector<edge> edges; +vector<vector<int>> adjlist; +int s, t; +vector<int> pt, dist; +ll flow, lim; queue<int> q; -void addEdge(int a, int b, ll c) { - g[a].push_back(e.size()); - e.push_back(edge {a, b, 0, c}); - g[b].push_back(e.size()); - e.push_back(edge {b, a, 0, 0}); +void addEdge(int from, int to, ll c) { + adjlist[from].push_back(sz(edges)); + edges.push_back({from, to, 0, c}); + adjlist[to].push_back(sz(edges)); + edges.push_back({to, from, 0, 0}); } bool bfs() { - for (int i = 0; i < n; i++) d[i] = INF; - d[s] = 0; - q.push(s); - while (!q.empty() && d[t] == INF) { - int cur = q.front(); q.pop(); - for (int i = 0; i < (int)g[cur].size(); i++) { - int id = g[cur][i], to = e[id].b; - if (d[to] == INF && e[id].c - e[id].f >= lim) { - d[to] = d[cur] + 1; - q.push(to); - } - } - } - while (!q.empty()) q.pop(); - return d[t] != INF; + dist.assign(sz(dist), -1); + dist[t] = sz(adjlist) + 1; + q.push(t); + while (!q.empty() && dist[s] < 0) { + int cur = q.front(); q.pop(); + for (int id : adjlist[cur]) { + int to = edges[id].to; + if (dist[to] < 0 && + edges[id ^ 1].c - edges[id ^ 1].f >= lim) { + dist[to] = dist[cur] - 1; + q.push(to); + }}} + while (!q.empty()) q.pop(); + return dist[s] >= 0; } bool dfs(int v, ll flow) { - if (flow == 0) return false; - if (v == t) return true; - for (; pt[v] < (int)g[v].size(); pt[v]++) { - int id = g[v][pt[v]], to = e[id].b; - if (d[to] == d[v] + 1 && e[id].c - e[id].f >= flow) { - int pushed = dfs(to, flow); - if (pushed) { - e[id].f += flow; - e[id ^ 1].f -= flow; - return true; - } - } - } - return false; + if (flow == 0) return false; + if (v == t) return true; + for (; pt[v] < sz(adjlist[v]); pt[v]++) { + int id = adjlist[v][pt[v]], to = edges[id].to; + if (dist[to] == dist[v] + 1 && + edges[id].c - edges[id].f >= flow) { + if (dfs(to, flow)) { + edges[id].f += flow; + edges[id ^ 1].f -= flow; + return true; + }}} + return false; } -// Nicht vergessen, s und t zu setzen! -void dinic() { - for (lim = (1LL << 62); lim >= 1;) { - if (!bfs()) { lim /= 2; continue; } - for (int i = 0; i < n; i++) pt[i] = 0; - int pushed; - while ((pushed = dfs(s, lim))) flow += lim; - } +ll maxFlow(int source, int target) { + s = source; + t = target; + flow = 0; + dist.resize(sz(adjlist)); + for (lim = (1LL << 62); lim >= 1;) { + if (!bfs()) {lim /= 2; continue;} + pt.assign(sz(adjlist), 0); + while (dfs(s, lim)) flow += lim; + } + return flow; } diff --git a/graph/euler.cpp b/graph/euler.cpp index a35ce13..0907ab2 100644 --- a/graph/euler.cpp +++ b/graph/euler.cpp @@ -1,40 +1,26 @@ -// Laufzeit: O(|V|+|E|) -vector< vector<int> > adjlist, otherIdx; -vector<int> cycle, validIdx; +vector<vector<int>> idx; +vector<int> to, validIdx, cycle; +vector<bool> used; -// Vertauscht Kanten mit Indizes a und b von Knoten n. -void swapEdges(int n, int a, int b) { - int neighA = adjlist[n][a], neighB = adjlist[n][b]; - int idxNeighA = otherIdx[n][a], idxNeighB = otherIdx[n][b]; - swap(adjlist[n][a], adjlist[n][b]); - swap(otherIdx[n][a], otherIdx[n][b]); - otherIdx[neighA][idxNeighA] = b; - otherIdx[neighB][idxNeighB] = a; -} - -// Entfernt Kante i von Knoten n (und die zugehörige Rückwärtskante). -void removeEdge(int n, int i) { - int other = adjlist[n][i]; - if (other == n) { //Schlingen. - validIdx[n]++; - return; - } - int otherIndex = otherIdx[n][i]; - validIdx[n]++; - if (otherIndex != validIdx[other]) { - swapEdges(other, otherIndex, validIdx[other]); - } - validIdx[other]++; +void addEdge(int a, int b) { + idx[a].push_back(sz(to)); + to.push_back(b); + used.push_back(false); + idx[b].push_back(sz(to)); //für ungerichtet + to.push_back(a); + used.push_back(false); } // Findet Eulerzyklus an Knoten n startend. -// Teste vorher, dass Graph zusammenhängend ist! Isolierten Knoten? -// Teste vorher, ob Eulerzyklus überhaupt existiert! +// init idx und validIdx void euler(int n) { - while (validIdx[n] < (int)adjlist[n].size()) { - int nn = adjlist[n][validIdx[n]]; - removeEdge(n, validIdx[n]); - euler(nn); - } - cycle.push_back(n); // Zyklus in cycle in umgekehrter Reihenfolge. + for (;validIdx[n] < sz(idx[n]); validIdx[n]++) { + if (!used[idx[n][validIdx[n]]]) { + int nn = to[idx[n][validIdx[n]]]; + used[idx[n][validIdx[n]]] = true; + used[idx[n][validIdx[n]] ^ 1] = true; //für ungerichtet + euler(nn); + }} + // Zyklus in cycle in umgekehrter Reihenfolge. + cycle.push_back(n); } diff --git a/graph/floydWarshall.cpp b/graph/floydWarshall.cpp index e9cd526..fb6263e 100644 --- a/graph/floydWarshall.cpp +++ b/graph/floydWarshall.cpp @@ -1,9 +1,26 @@ -// Initialisiere mat: mat[i][i] = 0, mat[i][j] = INF falls i & j nicht -// adjazent, Länge sonst. Laufzeit: O(|V|^3) +vector<vector<ll>> dist; // Entfernung zwischen je zwei Punkten. +vector<vector<int>> pre; + void floydWarshall() { - for (k = 0; k < MAX_V; k++) { - for (i = 0; i < MAX_V; i++) { - for (j = 0; j < MAX_V; j++) { - if (mat[i][k] != INF && mat[k][j] != INF && mat[i][k] + mat[k][j] < mat[i][j]) { - mat[i][j] = mat[i][k] + mat[k][j]; + pre.assign(sz(dist), vector<int>(sz(dist), -1)); + for (int i = 0; i < sz(dist); i++) { + for (int j = 0; j < sz(dist); j++) { + if (dist[i][j] < INF) { + pre[i][j] = j; + }}} + + for (int k = 0; k < sz(dist); k++) { + for (int i = 0; i < sz(dist); i++) { + for (int j = 0; j < sz(dist); j++) { + if (dist[i][j] > dist[i][k] + dist[k][j]) { + dist[i][j] = dist[i][k] + dist[k][j]; + pre[i][j] = pre[i][k]; }}}}} + +vector<int> getPath(int u, int v) { + //return dist[u][v]; // Pfadlänge u -> v + if (pre[u][v] < 0) return {}; + vector<int> path = {v}; + while (u != v) path.push_back(u = pre[u][v]); + return path; //Pfad u -> v +} diff --git a/graph/graph.tex b/graph/graph.tex index 37356f6..a26661d 100644 --- a/graph/graph.tex +++ b/graph/graph.tex @@ -1,90 +1,192 @@ \section{Graphen} -% \subsection{Minimale Spannbäume} +\begin{algorithm}{DFS} + \input{graph/dfs} +\end{algorithm} -% \paragraph{Schnitteigenschaft} -% Für jeden Schnitt $C$ im Graphen gilt: -% Gibt es eine Kante $e$, die echt leichter ist als alle anderen Schnittkanten, so gehört diese zu allen minimalen Spannbäumen. -% ($\Rightarrow$ Die leichteste Kante in einem Schnitt kann in einem minimalen Spannbaum verwendet werden.) +\begin{algorithm}{Minimale Spannbäume} + \paragraph{Schnitteigenschaft} + Für jeden Schnitt $C$ im Graphen gilt: + Gibt es eine Kante $e$, die echt leichter ist als alle anderen Schnittkanten, so gehört diese zu allen minimalen Spannbäumen. + ($\Rightarrow$ Die leichteste Kante in einem Schnitt kann in einem minimalen Spannbaum verwendet werden.) + + \paragraph{Kreiseigenschaft} + Für jeden Kreis $K$ im Graphen gilt: + Die schwerste Kante auf dem Kreis ist nicht Teil des minimalen Spannbaums. +\end{algorithm} -% \paragraph{Kreiseigenschaft} -% Für jeden Kreis $K$ im Graphen gilt: -% Die schwerste Kante auf dem Kreis ist nicht Teil des minimalen Spannbaums. +\begin{algorithm}{Kruskal} + \begin{methods}[ll] + berechnet den Minimalen Spannbaum & \runtime{\abs{E}\cdot\log(\abs{E})} \\ + \end{methods} + \sourcecode{graph/kruskal.cpp} +\end{algorithm} + +\begin{algorithm}{Erd\H{o}s-Gallai} + Sei $d_1 \geq \cdots \geq d_{n}$. Es existiert genau dann ein Graph $G$ mit Degreesequence $d$ falls $\sum\limits_{i=1}^{n} d_i$ gerade ist und für $1\leq k \leq n$: $\sum\limits_{i=1}^{k} d_i \leq k\cdot(k-1)+\sum\limits_{i=k+1}^{n} \min(d_i, k)$ + \begin{methods} + \method{havelHakimi}{findet Graph}{(\abs{V}+\abs{E})\cdot\log(\abs{V})} + \end{methods} + \sourcecode{graph/havelHakimi.cpp} +\end{algorithm} + +\begin{algorithm}{Centroids} + \begin{methods} + \method{find\_centroid}{findet alle Centroids des Baums (maximal 2)}{\abs{V}} + \end{methods} + \sourcecode{graph/centroid.cpp} +\end{algorithm} + +\begin{algorithm}{Baum-Isomorphie} + \begin{methods} + \method{getTreeLabel}{berechnet kanonischen Namen für einen Baum}{\abs{V}} + \end{methods} + \sourcecode{graph/treeIsomorphism.cpp} +\end{algorithm} \subsection{Kürzeste Wege} -% \subsubsection{Algorithmus von \textsc{Dijkstra}} -% Kürzeste Pfade in Graphen ohne negative Kanten. -\lstinputlisting{graph/dijkstra.cpp} - -% \subsubsection{\textsc{Bellmann-Ford}-Algorithmus} -% Kürzestes Pfade in Graphen mit negativen Kanten. -% Erkennt negative Zyklen. -\lstinputlisting{graph/bellmannFord.cpp} - -% \subsubsection{\textsc{Floyd-Warshall}-Algorithmus} -% \lstinputlisting{graph/floydWarshall.cpp} -Floyd Warshall: -\begin{itemize}[nosep] - \item Nur negative Werte sollten die Nullen bei Schlingen überschreiben. - \item Von parallelen Kanten sollte nur die günstigste gespeichert werden. - \item \lstinline{i} liegt genau dann auf einem negativen Kreis, wenn \lstinline{dist[i][i] < 0} ist. - \item Wenn für \lstinline{c} gilt, dass \lstinline{dist[u][c] != INF && dist[c][v] != INF && dist[c][c] < 0}, wird der u-v-Pfad beliebig kurz. -\end{itemize} +\subsubsection{Algorithmus von \textsc{Dijkstra}} +\method{dijkstra}{kürzeste Pfade in Graphen ohne negative Kanten}{\abs{E}\*\log(\abs{V})} +\sourcecode{graph/dijkstra.cpp} + +\subsubsection{\textsc{Bellmann-Ford}-Algorithmus} +\method{bellmanFord}{kürzeste Pfade oder negative Kreise finden}{\abs{V}\*\abs{E}} +\sourcecode{graph/bellmannFord.cpp} -\subsection{Strongly Connected Components (\textsc{Tarjans}-Algorithmus)} -\lstinputlisting{graph/scc.cpp} - -\subsection{Artikulationspunkte und Brücken} -\lstinputlisting{graph/articulationPoints.cpp} - -\subsection{Eulertouren} -\begin{itemize}[nosep] - \item Zyklus existiert, wenn jeder Knoten geraden Grad hat (ungerichtet), bzw. bei jedem Knoten Ein- und Ausgangsgrad übereinstimmen (gerichtet). - \item Pfad existiert, wenn alle bis auf (maximal) zwei Knoten geraden Grad haben (ungerichtet), bzw. bei allen Knoten bis auf zwei Ein- und Ausgangsgrad übereinstimmen, wobei einer eine Ausgangskante mehr hat (Startknoten) und einer eine Eingangskante mehr hat (Endknoten). - \item \textbf{Je nach Aufgabenstellung überprüfen, wie isolierte Punkte interpretiert werden sollen.} - \item Der Code unten läuft in Linearzeit. - Wenn das nicht notwenidg ist (oder bestimmte Sortierungen verlangt werden), gehts mit einem \lstinline{set} einfacher. - \item Algorithmus schlägt nicht fehl, falls kein Eulerzyklus existiert. - Die Existenz muss separat geprüft werden. +\subsubsection{\textsc{Floyd-Warshall}-Algorithmus} +\method{floydWarshall}{kürzeste Pfade oder negative Kreise finden}{\abs{V}^3} +\begin{itemize} + \item \code{dist[i][i] = 0, dist[i][j] = edge\{j, j\}.weight} oder \code{INF} + \item \code{i} liegt auf einem negativen Kreis $\Leftrightarrow$ \code{dist[i][i] < 0}. \end{itemize} -\begin{lstlisting} -VISIT(v): - forall e=(v,w) in E - delete e from E - VISIT(w) - print e -\end{lstlisting} -\lstinputlisting{graph/euler.cpp} - -\subsection{Lowest Common Ancestor} -\lstinputlisting{graph/lca.cpp} +\sourcecode{graph/floydWarshall.cpp} -\subsection{Max-Flow} +\subsubsection{Matrix-Algorithmus} +Sei $d_{ij}$ die Distanzmatrix von $G$, dann gibt $d_{ij}^k$ die kürzeste Distanz von $i$ nach $j$ mit maximal $k$ kanten an mit der Verknüpfung: $c_{ij} = a_{ij} * b_{ij} = \min\{a_{ik} + b_{kj}\}$ + + +Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, dann gibt $a_{ij}^k$ die Anzahl der Wege von $i$ nach $j$ mit Länge genau \textcolor{gray}{(maximal)} $k$ an mit der Verknüpfung: $c_{ij} = a_{ij} \* b_{ij} = \sum a_{ik} + b_{kj}$ + +\begin{algorithm}{Artikulationspunkte, Brücken und BCC} + \begin{methods} + \method{findArticulationPoints}{berechnet Artikulationspunkte,}{\abs{V}+\abs{E}} + \method{}{Brücken und BCC}{} + \end{methods} + \textbf{Wichtig:} isolierte Knoten und Brücken sind keine BCC. + \sourcecode{graph/articulationPoints.cpp} +\end{algorithm} +\begin{algorithm}{Strongly Connected Components (\textsc{Tarjan})} + \begin{methods} + \method{scc}{berechnet starke Zusammenhangskomponenten}{\abs{V}+\abs{E}} + \end{methods} + \sourcecode{graph/scc.cpp} +\end{algorithm} + +\begin{algorithm}{2-SAT} + \sourcecode{graph/2sat.cpp} +\end{algorithm} + +\begin{algorithm}{Eulertouren} + \begin{methods} + \method{euler}{berechnet den Kreis}{\abs{V}+\abs{E}} + \end{methods} + \sourcecode{graph/euler.cpp} + \begin{itemize} + \item Zyklus existiert, wenn jeder Knoten geraden Grad hat (ungerichtet),\\ bei jedem Knoten Ein- und Ausgangsgrad übereinstimmen (gerichtet). + \item Pfad existiert, wenn genau $\{0, 2\}$ Knoten ungeraden Grad haben (ungerichtet),\\ bei allen Knoten Ein- und Ausgangsgrad übereinstimmen oder einer eine Ausgangskante mehr hat (Startknoten) und einer eine Eingangskante mehr hat (Endknoten). + \item \textbf{Je nach Aufgabenstellung überprüfen, wie ein unzusammenhängender Graph interpretiert werden sollen.} + \item Wenn eine bestimmte Sortierung verlangt wird oder Laufzeit vernachlässigbar ist, ist eine Implementierung mit einem \code{vector<set<int>> adjlist} leichter + \item \textbf{Wichtig:} Algorithmus schlägt nicht fehl, falls kein Eulerzyklus existiert. + Die Existenz muss separat geprüft werden. + \end{itemize} +\end{algorithm} + +\begin{algorithm}{Cycle Counting} + \begin{methods} + \method{findBase}{berechnet Basis}{\abs{V}\cdot\abs{E}} + \method{count}{zählt Zykel}{2^{\abs{\mathit{base}}}} + \end{methods} + \begin{itemize} + \item jeder Zyklus ist das xor von einträgen in \code{base}. + \end{itemize} + \sourcecode{graph/cycleCounting.cpp} +\end{algorithm} + +\begin{algorithm}{Lowest Common Ancestor} + \begin{methods} + \method{init}{baut DFS-Baum über $g$ auf}{\abs{V}\*\log(\abs{V})} + \method{getLCA}{findet LCA}{1} + \method{getDepth}{berechnet Distanz zur Wurzel im DFS-Baum}{1} + \end{methods} + \sourcecode{graph/LCA_sparse.cpp} +\end{algorithm} + +\begin{algorithm}{Heavy-Light Decomposition} + \begin{methods} + \method{get\_intervals}{gibt Zerlegung des Pfades von $u$ nach $v$}{\log(\abs{V})} + \end{methods} + \textbf{Wichtig:} Intervalle sind halboffen + + Subbaum unter dem Knoten $v$ ist das Intervall $[\text{\code{in[v]}},~\text{\code{out[v]}})$. + \sourcecode{graph/hld.cpp} +\end{algorithm} + +\begin{algorithm}{Dynamic Connectivity} + \begin{methods} + \method{Constructor}{erzeugt Baum ($n$ Knoten, $m$ updates)}{n+m} + \method{addEdge}{fügt Kannte ein,\texttt{id}=delete Zeitpunkt}{\log(n)} + \method{eraseEdge}{entfernt Kante \texttt{id}}{\log(n)} + \end{methods} + \sourcecode{graph/connect.cpp} +\end{algorithm} + +\begin{algorithm}{Global Mincut} + \begin{methods} + \method{stoer\_wagner}{berechnet globalen Mincut}{\abs{V}^2\*\log(\abs{E})} + \method{merge(a,b)}{merged Knoten $b$ in Knoten $a$}{\abs{E}} + \end{methods} + \textbf{Tipp:} Cut Rekonstruktion mit \code{unionFind} für Partitionierung oder \code{vector<bool>} für edge id's im cut. + \sourcecode{graph/stoerWagner.cpp} +\end{algorithm} + +\subsection{Max-Flow} +\optional{ \subsubsection{Capacity Scaling} -Gut bei dünn besetzten Graphen. -\lstinputlisting{graph/capacityScaling.cpp} +\begin{methods} + \method{maxFlow}{gut bei dünn besetzten Graphen.}{\abs{E}^2\*\log(C)} + \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} +\end{methods} +\sourcecode{graph/capacityScaling.cpp} +} -% \subsubsection{Push Relabel} -% Gut bei sehr dicht besetzten Graphen. -% \lstinputlisting{graph/pushRelabel.cpp} +\subsubsection{Push Relabel} +\begin{methods} + \method{maxFlow}{gut bei sehr dicht besetzten Graphen.}{\abs{V}^2\*\sqrt{\abs{E}}} + \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} +\end{methods} +\sourcecode{graph/pushRelabel3.cpp} \subsubsection{Dinic's Algorithm mit Capacity Scaling} -Nochmal ca. Faktor 2 schneller als Ford Fulkerson mit Capacity Scaling. -\lstinputlisting{graph/dinicScaling.cpp} +\begin{methods} + \method{maxFlow}{doppelt so schnell wie Ford Fulkerson}{\abs{V}^2\cdot\abs{E}} + \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} +\end{methods} +\sourcecode{graph/dinicScaling.cpp} +\optional{ \subsubsection{Anwendungen} -\begin{itemize}[nosep] +\begin{itemize} \item \textbf{Maximum Edge Disjoint Paths}\newline Finde die maximale Anzahl Pfade von $s$ nach $t$, die keine Kante teilen. - \begin{enumerate}[nosep] + \begin{enumerate} \item Setze $s$ als Quelle, $t$ als Senke und die Kapazität jeder Kante auf 1. \item Der maximale Fluss entspricht den unterschiedlichen Pfaden ohne gemeinsame Kanten. \end{enumerate} \item \textbf{Maximum Independent Paths}\newline Finde die maximale Anzahl an Pfaden von $s$ nach $t$, die keinen Knoten teilen. - \begin{enumerate}[nosep] + \begin{enumerate} \item Setze $s$ als Quelle, $t$ als Senke und die Kapazität jeder Kante \emph{und jedes Knotens} auf 1. \item Der maximale Fluss entspricht den unterschiedlichen Pfaden ohne gemeinsame Knoten. \end{enumerate} @@ -93,26 +195,69 @@ Nochmal ca. Faktor 2 schneller als Ford Fulkerson mit Capacity Scaling. Bei Quelle $s$ und Senke $t$, partitioniere in $S$ und $T$. Zu $S$ gehören alle Knoten, die im Residualgraphen von $s$ aus erreichbar sind (Rückwärtskanten beachten). \end{itemize} +} -\subsection{Min-Cost-Max-Flow} -\lstinputlisting{graph/minCostMaxFlow.cpp} +\begin{algorithm}{Min-Cost-Max-Flow} + \begin{methods} + \method{mincostflow}{berechnet Fluss}{\abs{V}^2\cdot\abs{E}^2} + \end{methods} + \sourcecode{graph/minCostMaxFlow.cpp} +\end{algorithm} -\subsection{Maximal Cardinatlity Bipartite Matching}\label{kuhn} -\lstinputlisting{graph/maxCarBiMatch.cpp} -\lstinputlisting{graph/hopcroftKarp.cpp} +\begin{algorithm}{Maximal Cardinatlity Bipartite Matching} + \label{kuhn} + \begin{methods} + \method{kuhn}{berechnet Matching}{\abs{V}\*\min(ans^2, \abs{E})} + \end{methods} + \begin{itemize} + \item die ersten [0..n) Knoten in \code{adjlist} sind die linke Seite des Graphen + \end{itemize} + \sourcecode{graph/maxCarBiMatch.cpp} + \begin{methods} + \method{hopcroft\_karp}{berechnet Matching}{\sqrt{\abs{V}}\*\abs{E}} + \end{methods} + \sourcecode{graph/hopcroftKarp.cpp} +\end{algorithm} -\subsection{Maximum Weight Bipartite Matching} -\lstinputlisting{graph/maxWeightBipartiteMatching.cpp} +\begin{algorithm}{Maximum Weight Bipartite Matching} + \begin{methods} + \method{match}{berechnet Matching}{\abs{V}^3} + \end{methods} + \sourcecode{graph/maxWeightBipartiteMatching.cpp} +\end{algorithm} -\subsection{Wert des maximalen Matchings} -\lstinputlisting{graph/matching.cpp} +\begin{algorithm}{Wert des maximalen Matchings} + Fehlerwahrscheinlichkeit: $\left(\frac{m}{MOD}\right)^I$ + \sourcecode{graph/matching.cpp} +\end{algorithm} -\subsection{2-SAT} -\lstinputlisting{graph/2sat.cpp} +\begin{algorithm}{Allgemeines maximales Matching} + \begin{methods} + \method{match}{berechnet algemeines Matching}{\abs{E}\*\abs{V}\*\log(\abs{V})} + \end{methods} + \sourcecode{graph/blossom.cpp} +\end{algorithm} -% \subsection{TSP} -% \lstinputlisting{graph/TSP.cpp} +\optional{ +\begin{algorithm}{TSP} + \begin{methods} + \method{TSP}{berechnet eine Tour}{n^2\*2^n} + \end{methods} + \sourcecode{graph/TSP.cpp} +\end{algorithm} -\subsection{Bitonic TSP} -\lstinputlisting{graph/bitonicTSP.cpp} +\begin{algorithm}{Bitonic TSP} + \begin{methods} + \method{bitonicTSP}{berechnet eine Bitonische Tour}{n^2} + \end{methods} + \sourcecode{graph/bitonicTSPsimple.cpp} +\end{algorithm} +} +\begin{algorithm}{Maximal Cliques} + \begin{methods} + \method{bronKerbosch}{berechnet alle maximalen Cliquen}{3^\frac{n}{3}} + \method{addEdge}{fügt \textbf{ungerichtete} Kante ein}{1} + \end{methods} + \sourcecode{graph/bronKerbosch.cpp} +\end{algorithm} diff --git a/graph/havelHakimi.cpp b/graph/havelHakimi.cpp new file mode 100644 index 0000000..9fb9846 --- /dev/null +++ b/graph/havelHakimi.cpp @@ -0,0 +1,19 @@ +vector<vector<int>> havelHakimi(const vector<int>& deg) { + priority_queue<pair<int, int>> pq; + for (int i = 0; i < sz(deg); i++) pq.push({deg[i], i}); + vector<vector<int>> adj; + while (!pq.empty()) { + auto v = pq.top(); pq.pop(); + if (sz(pq) < v.first) return {}; //ERROR + vector<pair<int, int>> todo; + for (int i = 0; i < v.first; i++) { + auto u = pq.top(); pq.pop(); + adj[v.second].push_back(u.second); + adj[u.second].push_back(v.second); + u.first--; + if (u.first > 0) todo.push_back(u); + } + for (auto e : todo) pq.push(e); + } + return adj; +} diff --git a/graph/hld.cpp b/graph/hld.cpp new file mode 100644 index 0000000..3d63903 --- /dev/null +++ b/graph/hld.cpp @@ -0,0 +1,52 @@ +vector<vector<int>> adj; +vector<int> sz, in, out, nxt, par; +int t; + +void dfs_sz(int v = 0, int from = -1) { + sz[v] = 1; + for (auto& u : adj[v]) { + if (u != from) { + dfs_sz(u, v); + sz[v] += sz[u]; + } + if (adj[v][0] == from || sz[u] > sz[adj[v][0]]) { + swap(u, adj[v][0]); +}}} + +void dfs_hld(int v = 0, int from = -1) { + par[v] = from; + in[v] = t++; + for (int u : adj[v]) { + if (u == from) continue; + nxt[u] = (u == adj[v][0] ? nxt[v] : u); + dfs_hld(u, v); + } + out[v] = t; +} + +void init() { + int n = sz(adj); + sz.assign(n, 0); in.assign(n, 0); out.assign(n, 0); + nxt.assign(n, 0); par.assign(n, -1); + t = 0; + dfs_sz(); dfs_hld(); +} + +vector<pair<int, int>> get_intervals(int u, int v) { + vector<pair<int, int>> res; + while (true) { + if (in[v] < in[u]) swap(u, v); + if (in[nxt[v]] <= in[u]) { + res.eb(in[u], in[v] + 1); + return res; + } + res.eb(in[nxt[v]], in[v] + 1); + v = par[nxt[v]]; +}} + +int get_lca(int u, int v) { + while (true) { + if (in[v] < in[u]) swap(u, v); + if (in[nxt[v]] <= in[u]) return in[u]; + v = par[nxt[v]]; +}} diff --git a/graph/hopcroftKarp.cpp b/graph/hopcroftKarp.cpp index 629ebf7..132b249 100644 --- a/graph/hopcroftKarp.cpp +++ b/graph/hopcroftKarp.cpp @@ -1,46 +1,43 @@ -// Laufzeit: O(sqrt(|V|)*|E|) -// Kanten von links nach rechts. -// 0: dummy Knoten, 1..n: linke Knoten, n+1..n+m: rechte Knoten vector<vector<int>> adjlist; -vector<int> match, dist; +// pairs ist der gematchte Knoten oder -1 +vector<int> pairs, dist; bool bfs(int n) { - queue<int> q; - dist[0] = INF; - for(int i = 1; i <= n; i++) { - if(match[i] == 0) { dist[i] = 0; q.push(i); } - else dist[i] = INF; - } - while(!q.empty()) { - int u = q.front(); q.pop(); - if(dist[u] < dist[0]) for (int v : adjlist[u]) - if(dist[match[v]] == INF) { - dist[match[v]] = dist[u] + 1; - q.push(match[v]); - } - } - return dist[0] != INF; + queue<int> q; + for(int i = 0; i < n; i++) { + if (pairs[i] < 0) {dist[i] = 0; q.push(i);} + else dist[i] = -1; + } + while(!q.empty()) { + int u = q.front(); q.pop(); + for (int v : adjlist[u]) { + if (pairs[v] < 0) return true; + if (dist[pairs[v]] < 0) { + dist[pairs[v]] = dist[u] + 1; + q.push(pairs[v]); + }}} + return false; } bool dfs(int u) { - if(u != 0) { - for (int v : adjlist[u]) - if(dist[match[v]] == dist[u] + 1) - if(dfs(match[v])) { match[v] = u; match[u] = v; return true; } - dist[u] = INF; - return false; - } - return true; + for (int v : adjlist[u]) { + if (pairs[v] < 0 || + (dist[pairs[v]] > dist[u] && dfs(pairs[v]))) { + pairs[v] = u; pairs[u] = v; + return true; + }} + dist[u] = -1; + return false; } int hopcroft_karp(int n) { // n = #Knoten links - int ans = 0; - match.assign(adjlist.size(), 0); - dist.resize(adjlist.size()); - // Greedy Matching, optionale Beschleunigung. - for (int i = 1; i <= n; i++) for (int w : adjlist[i]) - if (match[w] == 0) { match[i] = w; match[w] = i; ans++; break; } - while(bfs(n)) for(int i = 1; i <= n; i++) - if(match[i] == 0 && dfs(i)) ans++; - return ans; -} + int ans = 0; + pairs.assign(sz(adjlist), -1); + dist.resize(n); + // Greedy Matching, optionale Beschleunigung. + for (int i = 0; i < n; i++) for (int w : adjlist[i]) + if (pairs[w] < 0) {pairs[i] = w; pairs[w] = i; ans++; break;} + while(bfs(n)) for(int i = 0; i < n; i++) + if (pairs[i] < 0) ans += dfs(i); + return ans; +}
\ No newline at end of file diff --git a/graph/kruskal.cpp b/graph/kruskal.cpp new file mode 100644 index 0000000..af5a8ff --- /dev/null +++ b/graph/kruskal.cpp @@ -0,0 +1,9 @@ +sort(edges.begin(), edges.end()); +vector<edge> mst; +int cost = 0; +for (edge& e : edges) { + if (findSet(e.from) != findSet(e.to)) { + unionSets(e.from, e.to); + mst.push_back(e); + cost += e.cost; +}} diff --git a/graph/lca.cpp b/graph/lca.cpp deleted file mode 100644 index d6548e9..0000000 --- a/graph/lca.cpp +++ /dev/null @@ -1,28 +0,0 @@ -struct LCA { - vector<int> depth, visited, first; - int idx; - SparseTable st; - - void init(vector<vector<int>> &g, int root) { // Laufzeit: O(|V|) - depth.assign(2 * g.size(), 0); - visited.assign(2 * g.size(), -1); - first.assign(g.size(), 2 * g.size()); - idx = 0; - visit(g, root, 0); - st.init(&depth); - } - - void visit(vector<vector<int>> &g, int v, int d) { - visited[idx] = v, depth[idx] = d, first[v] = min(idx, first[v]), idx++; - - for (int w : g[v]) { - if (first[w] == 2 * (int)g.size()) { - visit(g, w, d + 1); - visited[idx] = v, depth[idx] = d, idx++; - }}} - - int getLCA(int a, int b) { // Laufzeit: O(1) - if (first[a] > first[b]) swap(a, b); - return visited[st.queryIdempotent(first[a], first[b])]; - } -}; diff --git a/graph/matching.cpp b/graph/matching.cpp index 4383330..ed9ba62 100644 --- a/graph/matching.cpp +++ b/graph/matching.cpp @@ -1,35 +1,41 @@ -// Fehlerwahrscheinlichkeit: (n / MOD)^I -const int N=200, MOD=1000000007, I=10; -int n, adj[N][N], a[N][N]; +constexpr int MOD=1000000007, I=10; +vector<vector<ll>> adjlist, mat; -int rank() { - int r = 0; - for (int j = 0; j < n; j++) { - int k = r; - while (k < n && !a[k][j]) ++k; - if (k == n) continue; - swap(a[r], a[k]); - int inv = powmod(a[r][j], MOD - 2); - for (int i = j; i < n; i++) - a[r][i] = 1LL * a[r][i] * inv % MOD; - for (int u = r + 1; u < n; u++) - for (int v = j; v < n; v++) - a[u][v] = (a[u][v] - 1LL * a[r][v] * a[u][j] % MOD + MOD) % MOD; - ++r; - } - return r; +int gauss(int n, ll p) { + int rank = n; + for (int line = 0; line < n; line++) { + int swappee = line; + while (swappee < n && mat[swappee][line] == 0) swappee++; + if (swappee == n) {rank--; continue;} + swap(mat[line], mat[swappee]); + ll factor = powMod(mat[line][line], p - 2, p); + for (int i = 0; i < n; i++) { + mat[line][i] *= factor; + mat[line][i] %= p; + } + for (int i = 0; i < n; i++) { + if (i == line) continue; + ll diff = mat[i][line]; + for (int j = 0; j < n; j++) { + mat[i][j] -= (diff * mat[line][j]) % p; + mat[i][j] %= p; + if (mat[i][j] < 0) mat[i][j] += p; + }}} + return rank; } int max_matching() { - int ans = 0; - for (int _ = 0; _ < I; _++) { - for (int i = 0; i < n; i++) { - for (int j = 0; j < i; j++) { - if (adj[i][j]) { - a[i][j] = rand() % (MOD - 1) + 1; - a[j][i] = MOD - a[i][j]; - }}} - ans = max(ans, rank()/2); - } - return ans; + int ans = 0; + mat.assign(sz(adjlist), vector<ll>(sz(adjlist))); + for (int _ = 0; _ < I; _++) { + for (int i = 0; i < sz(adjlist); i++) { + mat[i].assign(sz(adjlist), 0); + for (int j : adjlist[i]) { + if (j < i) { + mat[i][j] = rand() % (MOD - 1) + 1; + mat[j][i] = MOD - mat[i][j]; + }}} + ans = max(ans, gauss(sz(adjlist), MOD)/2); + } + return ans; } diff --git a/graph/maxCarBiMatch.cpp b/graph/maxCarBiMatch.cpp index 24aebef..0a38d84 100644 --- a/graph/maxCarBiMatch.cpp +++ b/graph/maxCarBiMatch.cpp @@ -1,5 +1,3 @@ -// Laufzeit: O(n*min(ans^2, |E|)) -// Kanten von links nach rechts. Die ersten n Knoten sind links, die anderen rechts. vector<vector<int>> adjlist; vector<int> pairs; // Der gematchte Knoten oder -1. vector<bool> visited; @@ -14,12 +12,12 @@ bool dfs(int v) { } int kuhn(int n) { // n = #Knoten links. - pairs.assign(adjlist.size(), -1); + pairs.assign(sz(adjlist), -1); int ans = 0; // Greedy Matching. Optionale Beschleunigung. for (int i = 0; i < n; i++) for (auto w : adjlist[i]) - if (pairs[w] == -1) { pairs[i] = w; pairs[w] = i; ans++; break; } - for (int i = 0; i < n; i++) if (pairs[i] == -1) { + if (pairs[w] < 0) {pairs[i] = w; pairs[w] = i; ans++; break;} + for (int i = 0; i < n; i++) if (pairs[i] < 0) { visited.assign(n, false); ans += dfs(i); } diff --git a/graph/maxWeightBipartiteMatching.cpp b/graph/maxWeightBipartiteMatching.cpp index f734fa4..ef99232 100644 --- a/graph/maxWeightBipartiteMatching.cpp +++ b/graph/maxWeightBipartiteMatching.cpp @@ -1,54 +1,59 @@ -// Laufzeit: O(|V|^3) -int costs[N_LEFT][N_RIGHT]; +double costs[N_LEFT][N_RIGHT]; -// Es muss l<=r sein, ansonsten terminiert der Algorithmus nicht. -int match(int l, int r) { - vector<int> xy(l, -1), yx(r, -1), lx(l), ly(r, 0), augmenting(r); - vector<bool> s(l); - vector<ii> slack(r, ii(0,0)); +// Es muss l<=r sein! (sonst Endlosschleife) +double match(int l, int r) { + vector<double> lx(l), ly(r); + //xy is matching from l->r, yx from r->l, or -1 + vector<int> xy(l, -1), yx(r, -1), augmenting(r); + vector<bool> s(l); + vector<pair<double, int>> slack(r); - for (int x = 0; x < l; x++) lx[x] = *max_element(costs[x], costs[x] + r); - for (int root = 0; root < l; root++) { - fill(augmenting.begin(), augmenting.end(), -1); - fill(s.begin(), s.end(), false); - s[root] = true; - for (int y = 0; y < r; y++) { - slack[y] = ii(lx[root] + ly[y] - costs[root][y], root); - } - int y = -1; - for (;;) { - int delta = INT_MAX, x = -1; - for (int yy = 0; yy < r; yy++) { - if (augmenting[yy] == -1) { - if (slack[yy].first < delta) { - delta = slack[yy].first; - x = slack[yy].second; - y = yy; - }}} - if (delta > 0) { - for (int x = 0; x < l; x++) if (s[x]) lx[x] -= delta; - for (int y = 0; y < r; y++) { - if (augmenting[y] > -1) ly[y] += delta; - else slack[y].first -= delta; - }} - augmenting[y] = x; - x = yx[y]; - if (x == -1) break; - s[x] = true; - for (int y = 0; y < r; y++) { - if (augmenting[y] == -1) { - ii alt = ii(lx[x] + ly[y] - costs[x][y], x); - if (slack[y].first > alt.first) { - slack[y] = alt; - }}}} - while (y != -1) { - // Jede Iteration vergrößert Matching um 1 (können 0-Kanten sein!). - int x = augmenting[y]; - int prec = xy[x]; - yx[y] = x; - xy[x] = y; - y = prec; - }} - return accumulate(lx.begin(), lx.end(), 0) + - accumulate(ly.begin(), ly.end(), 0); // Wert des Matchings. + for (int x = 0; x < l; x++) + lx[x] = *max_element(costs[x], costs[x] + r); + for (int root = 0; root < l; root++) { + augmenting.assign(r, -1); + s.assign(l, false); + s[root] = true; + for (int y = 0; y < r; y++) { + slack[y] = {lx[root] + ly[y] - costs[root][y], root}; + } + int y = -1; + while (true) { + double delta = INF; + int x = -1; + for (int yy = 0; yy < r; yy++) { + if (augmenting[yy] < 0) { + if (slack[yy].first < delta) { + delta = slack[yy].first; + x = slack[yy].second; + y = yy; + }}} + if (delta > 0) { + for (int x = 0; x < l; x++) if (s[x]) lx[x] -= delta; + for (int y = 0; y < r; y++) { + if (augmenting[y] >= 0) ly[y] += delta; + else slack[y].first -= delta; + }} + augmenting[y] = x; + x = yx[y]; + if (x == -1) break; + s[x] = true; + for (int y = 0; y < r; y++) { + if (augmenting[y] < 0) { + double alt = lx[x] + ly[y] - costs[x][y]; + if (slack[y].first > alt) { + slack[y] = {alt, x}; + }}}} + while (y >= 0) { + // Jede Iteration vergrößert Matching um 1 + // (können 0-Kanten sein!) + int x = augmenting[y]; + int prec = xy[x]; + yx[y] = x; + xy[x] = y; + y = prec; + }} + // Wert des Matchings + return accumulate(lx.begin(), lx.end(), 0.0) + + accumulate(ly.begin(), ly.end(), 0.0); } diff --git a/graph/minCostMaxFlow.cpp b/graph/minCostMaxFlow.cpp index 8d3ca4c..ee8aa10 100644 --- a/graph/minCostMaxFlow.cpp +++ b/graph/minCostMaxFlow.cpp @@ -1,67 +1,64 @@ -static const ll flowlimit = 1LL << 60; // Größer als der maximale Fluss. -struct MinCostFlow { // Mit new erstellen! - static const int maxn = 400; // Größer als die Anzahl der Knoten. - static const int maxm = 5000; // Größer als die Anzahhl der Kanten. - struct edge { int node, next; ll flow, value; } edges[maxm << 1]; - int graph[maxn], queue[maxn], pre[maxn], con[maxn]; - int n, m, source, target, top; - bool inqueue[maxn]; - ll maxflow, mincost, dis[maxn]; +constexpr ll INF = 1LL << 60; // Größer als der maximale Fluss. +struct MinCostFlow { + struct edge { + int to; + ll f, cost; + }; + vector<edge> edges; + vector<vector<int>> adjlist; + vector<int> pref, con; + vector<ll> dist; - MinCostFlow() { memset(graph, -1, sizeof(graph)); top = 0; } + const int s, t; + ll maxflow, mincost; - inline int inverse(int x) { return 1 + ((x >> 1) << 2) - x; } + MinCostFlow(int n, int source, int target) : + adjlist(n), s(source), t(target) {}; - // Directed edge from u to v, capacity c, weight w. - inline int addedge(int u, int v, int c, int w) { - edges[top].value = w; edges[top].flow = c; edges[top].node = v; - edges[top].next = graph[u]; graph[u] = top++; - edges[top].value = -w; edges[top].flow = 0; edges[top].node = u; - edges[top].next = graph[v]; graph[v] = top++; - return top - 2; + void addedge(int u, int v, ll c, ll cost) { + adjlist[u].push_back(sz(edges)); + edges.push_back({v, c, cost}); + adjlist[v].push_back(sz(edges)); + edges.push_back({u, 0, -cost}); } bool SPFA() { - int point, node, now, head = 0, tail = 1; - memset(pre, -1, sizeof(pre)); - memset(inqueue, 0, sizeof(inqueue)); - memset(dis, 0x7F, sizeof(dis)); - dis[source] = 0; queue[0] = source; - pre[source] = source; inqueue[source] = true; + pref.assign(sz(adjlist), - 1); + dist.assign(sz(adjlist), INF); + vector<bool> inqueue(sz(adjlist)); + queue<int> queue; - while (head != tail) { - now = queue[head++]; - point = graph[now]; - inqueue[now] = false; - head %= maxn; + dist[s] = 0; queue.push(s); + pref[s] = s; inqueue[s] = true; - while (point != -1) { - node = edges[point].node; - if (edges[point].flow > 0 && - dis[node] > dis[now] + edges[point].value) { - dis[node] = dis[now] + edges[point].value; - pre[node] = now; con[node] = point; - if (!inqueue[node]) { - inqueue[node] = true; queue[tail++] = node; - tail %= maxn; - }} - point = edges[point].next; - }} - return pre[target] != -1; + while (!queue.empty()) { + int cur = queue.front(); queue.pop(); + inqueue[cur] = false; + for (int id : adjlist[cur]) { + int to = edges[id].to; + if (edges[id].f > 0 && + dist[to] > dist[cur] + edges[id].cost) { + dist[to] = dist[cur] + edges[id].cost; + pref[to] = cur; con[to] = id; + if (!inqueue[to]) { + inqueue[to] = true; queue.push(to); + }}}} + return pref[t] != -1; } void extend() { - ll w = flowlimit; - for (int u = target; pre[u] != u; u = pre[u]) - w = min(w, edges[con[u]].flow); + ll w = INF; + for (int u = t; pref[u] != u; u = pref[u]) + w = min(w, edges[con[u]].f); maxflow += w; - mincost += dis[target] * w; - for (int u = target; pre[u] != u; u = pre[u]) { - edges[con[u]].flow -= w; - edges[inverse(con[u])].flow += w; + mincost += dist[t] * w; + for (int u = t; pref[u] != u; u = pref[u]) { + edges[con[u]].f -= w; + edges[con[u] ^ 1].f += w; }} void mincostflow() { + con.assign(sz(adjlist), 0); maxflow = mincost = 0; while (SPFA()) extend(); } diff --git a/graph/pushRelabel.cpp b/graph/pushRelabel.cpp index 7bb1145..182fa12 100644 --- a/graph/pushRelabel.cpp +++ b/graph/pushRelabel.cpp @@ -1,28 +1,29 @@ -// Laufzeit: O(|V|^3) struct PushRelabel { - ll capacities[MAX_V][MAX_V], flow[MAX_V][MAX_V], excess[MAX_V]; - int height[MAX_V], list[MAX_V - 2], seen[MAX_V], n; + vector<vector<long long>> capacitie, flow; + vector<long long> excess; + vector<int> height, seen, list; + int n; PushRelabel(int n) { this->n = n; - memset(capacities, 0L, sizeof(capacities)); - memset(flow, 0L, sizeof(flow)); - memset(excess, 0L, sizeof(excess)); - memset(height, 0, sizeof(height)); - memset(list, 0, sizeof(list)); - memset(seen, 0, sizeof(seen)); + capacities.assign(n, vector<long long>(n)); + flow.assign(n, vector<long long>(n)); + excess.assign(n, 0); + height.assign(n, 0); + seen.assign(n, 0); + list.assign(n - 2, 0); } - inline void addEdge(int u, int v, ll c) { capacities[u][v] += c; } + inline void addEdge(int u, int v, long long c) {capacities[u][v] += c;} void push(int u, int v) { - ll send = min(excess[u], capacities[u][v] - flow[u][v]); + long long send = min(excess[u], capacities[u][v] - flow[u][v]); flow[u][v] += send; flow[v][u] -= send; excess[u] -= send; excess[v] += send; } void relabel(int u) { - int minHeight = INT_MAX / 2; + int minHeight = INT_INF; for (int v = 0; v < n; v++) { if (capacities[u][v] - flow[u][v] > 0) { minHeight = min(minHeight, height[v]); @@ -47,12 +48,12 @@ struct PushRelabel { list[0] = temp; } - ll maxFlow(int source, int target) { + long long maxFlow(int source, int target) { for (int i = 0, p = 0; i < n; i++) if (i != source && i != target) list[p++] = i; height[source] = n; - excess[source] = LLONG_MAX / 2; + excess[source] = INF; for (int i = 0; i < n; i++) push(source, i); int p = 0; @@ -65,7 +66,7 @@ struct PushRelabel { } else p++; } - ll maxflow = 0L; + long long maxflow = 0; for (int i = 0; i < n; i++) maxflow += flow[source][i]; return maxflow; } diff --git a/graph/pushRelabel2.cpp b/graph/pushRelabel2.cpp new file mode 100644 index 0000000..8b5f0c6 --- /dev/null +++ b/graph/pushRelabel2.cpp @@ -0,0 +1,109 @@ +constexpr ll inf = 1ll<<60; + +struct edge { + int from, to; + ll f, c; +}; + +vector<edge> edges; +vector<vector<int>> adjlist, llist; +vector<int> height, ccount, que; +vector<ll> excess; +vector<list<int>> dlist; +vector<list<int>::iterator> iter; +int highest, highestActive; + +void addEdge(int from, int to, ll c) { + adjlist[from].push_back(edges.size()); + edges.push_back({from, to, 0, c}); + adjlist[to].push_back(edges.size()); + edges.push_back({to, from, 0, 0}); +} + +void globalRelabel(int n, int t) { + height.assign(n, n); + height[t] = 0; + ccount.assign(n, 0); + que.assign(n+1, 0); + int qh = 0, qt = 0; + for (que[qt++] = t; qh < qt;) { + int u = que[qh++], h = height[u] + 1; + for (int id : adjlist[u]) { + if (height[edges[id].to] == n && edges[id ^ 1].c - edges[id ^ 1].f > 0) { + ccount[height[edges[id].to] = h]++; + que[qt++] = edges[id].to; + }}} + llist.assign(n+1, {}); + dlist.assign(n+1, {}); + for (int u = 0; u < n; u++) { + if (height[u] < n) { + iter[u] = dlist[height[u]].insert(dlist[height[u]].begin(), u); + if (excess[u] > 0) llist[height[u]].push_back(u); + }} + highest = highestActive = height[que[qt - 1]]; +} + +void push(int u, int id) { + int v = edges[id].to; + ll df = min(excess[u], edges[id].c - edges[id].f); + edges[id].f += df; + edges[id^1].f -= df; + excess[u] -= df; + excess[v] += df; + if (0 < excess[v] && excess[v] <= df) llist[height[v]].push_back(v); +} + +void discharge(int n, int u) { + int nh = n; + for (int id : adjlist[u]) { + if (edges[id].c - edges[id].f > 0) { + if (height[u] == height[edges[id].to] + 1) { + push(u, id); + if (!excess[u]) return; + } else { + nh = min(nh, height[edges[id].to] + 1); + }}} + int h = height[u]; + if (ccount[h] == 1) { + for (int i = h; i <= highest; i++) { + for (auto p : dlist[i]) --ccount[height[p]], height[p] = n; + dlist[i].clear(); + } + highest = h - 1; + } else { + --ccount[h], iter[u] = dlist[h].erase(iter[u]), height[u] = nh; + if (nh == n) return; + ++ccount[nh], iter[u] = dlist[nh].insert(dlist[nh].begin(), u); + highest = max(highest, highestActive = nh); + llist[nh].push_back(u); +}} + +ll maxFlow(int s, int t) { + int n = adjlist.size(); + llist.assign(n + 1, {}); + dlist.assign(n + 1, {}); + highestActive = highest = 0; + height.assign(n, 0); + height[s] = n; + iter.resize(n); + for (int i = 0; i < n; i++) { + if (i != s) iter[i] = dlist[height[i]].insert(dlist[height[i]].begin(), i); + } + ccount.assign(n, 0); + ccount[0] = n-1; + excess.assign(n, 0); + excess[s] = inf; + excess[t] = -inf; + for (int id : adjlist[s]) push(s, id); + globalRelabel(n, t); + while (highestActive >= 0) { + if (llist[highestActive].empty()) { + highestActive--; + continue; + } + int u = llist[highestActive].back(); + llist[highestActive].pop_back(); + discharge(n, u); + } + return excess[t] + inf; +}
\ No newline at end of file diff --git a/graph/pushRelabel3.cpp b/graph/pushRelabel3.cpp new file mode 100644 index 0000000..d4d2e67 --- /dev/null +++ b/graph/pushRelabel3.cpp @@ -0,0 +1,66 @@ +struct edge { + int from, to; + ll f, c; +}; + +vector<edge> edges; +vector<vector<int>> adjlist, hs; +vector<ll> ec; +vector<int> cur, H; + +void addEdge(int from, int to, ll c) { + adjlist[from].push_back(sz(edges)); + edges.push_back({from, to, 0, c}); + adjlist[to].push_back(sz(edges)); + edges.push_back({to, from, 0, 0}); +} + +void addFlow(int id, ll f) { + if (ec[edges[id].to] == 0 && f > 0) + hs[H[edges[id].to]].push_back(edges[id].to); + edges[id].f += f; + edges[id^1].f -= f; + ec[edges[id].to] += f; + ec[edges[id].from] -= f; +} + +ll maxFlow(int s, int t) { + int n = sz(adjlist); + hs.assign(2*n, {}); + ec.assign(n, 0); + cur.assign(n, 0); + H.assign(n, 0); + H[s] = n; + ec[t] = 1;//never set t to active... + vector<int> co(2*n); + co[0] = n - 1; + for (int id : adjlist[s]) addFlow(id, edges[id].c); + for (int hi = 0;;) { + while (hs[hi].empty()) if (!hi--) return -ec[s]; + int u = hs[hi].back(); + hs[hi].pop_back(); + while (ec[u] > 0) { + if (cur[u] == sz(adjlist[u])) { + H[u] = 2*n; + for (int i = 0; i < sz(adjlist[u]); i++) { + int id = adjlist[u][i]; + if (edges[id].c - edges[id].f > 0 && + H[u] > H[edges[id].to] + 1) { + H[u] = H[edges[id].to] + 1; + cur[u] = i; + }} + co[H[u]]++; + if (!--co[hi] && hi < n) { + for (int i = 0; i < n; i++) { + if (hi < H[i] && H[i] < n) { + co[H[i]]--; + H[i] = n + 1; + }}} + hi = H[u]; + } else { + auto e = edges[adjlist[u][cur[u]]]; + if (e.c - e.f > 0 && H[u] == H[e.to] + 1) { + addFlow(adjlist[u][cur[u]], min(ec[u], e.c - e.f)); + } else { + cur[u]++; +}}}}} diff --git a/graph/scc.cpp b/graph/scc.cpp index 9eb0881..b21d544 100644 --- a/graph/scc.cpp +++ b/graph/scc.cpp @@ -1,17 +1,17 @@ -// Laufzeit: O(|V|+|E|) +vector<vector<int>> adjlist; + int counter, sccCounter; -vector<bool> visited, inStack; -vector< vector<int> > adjlist; -vector<int> d, low, sccs; // sccs enthält den Index der SCC pro Knoten. -stack<int> s; +vector<bool> inStack; +vector<vector<int>> sccs; +// idx enthält den Index der SCC pro Knoten. +vector<int> d, low, idx, s; void visit(int v) { - visited[v] = true; d[v] = low[v] = counter++; - s.push(v); inStack[v] = true; + s.push_back(v); inStack[v] = true; for (auto u : adjlist[v]) { - if (!visited[u]) { + if (d[u] < 0) { visit(u); low[v] = min(low[v], low[u]); } else if (inStack[u]) { @@ -19,23 +19,23 @@ void visit(int v) { }} if (d[v] == low[v]) { + sccs.push_back({}); int u; do { - u = s.top(); s.pop(); inStack[u] = false; - sccs[u] = sccCounter; + u = s.back(); s.pop_back(); inStack[u] = false; + idx[u] = sccCounter; + sccs[sccCounter].push_back(u); } while (u != v); sccCounter++; }} void scc() { - visited.assign(adjlist.size(), false); - d.assign(adjlist.size(), -1); - low.assign(adjlist.size(), -1); - inStack.assign(adjlist.size(), false); - sccs.resize(adjlist.size(), -1); + inStack.assign(sz(adjlist), false); + d.assign(sz(adjlist), -1); + low.assign(sz(adjlist), -1); + idx.assign(sz(adjlist), -1); counter = sccCounter = 0; - for (int i = 0; i < (int)adjlist.size(); i++) { - if (!visited[i]) { - visit(i); -}}} + for (int i = 0; i < sz(adjlist); i++) { + if (d[i] < 0) visit(i); +}} diff --git a/graph/stoerWagner.cpp b/graph/stoerWagner.cpp new file mode 100644 index 0000000..899cb3b --- /dev/null +++ b/graph/stoerWagner.cpp @@ -0,0 +1,53 @@ +struct edge { + int from, to; + ll cap; +}; + +vector<vector<edge>> adjlist, tmp; +vector<bool> erased; + +void merge(int a, int b) { + tmp[a].insert(tmp[a].end(), all(tmp[b])); + tmp[b].clear(); + erased[b] = true; + for (auto& v : tmp) { + for (auto&e : v) { + if (e.from == b) e.from = a; + if (e.to == b) e.to = a; +}}} + +ll stoer_wagner() { + ll res = INF; + tmp = adjlist; + erased.assign(sz(tmp), false); + for (int i = 1; i < sz(tmp); i++) { + int s = 0; + while (erased[s]) s++; + priority_queue<pair<ll, int>> pq; + pq.push({0, s}); + vector<ll> con(sz(tmp)); + ll cur = 0; + vector<pair<ll, int>> state; + while (!pq.empty()) { + int c = pq.top().second; + pq.pop(); + if (con[c] < 0) continue; //already seen + con[c] = -1; + for (auto e : tmp[c]) { + if (con[e.to] >= 0) {//add edge to cut + con[e.to] += e.cap; + pq.push({con[e.to], e.to}); + cur += e.cap; + } else if (e.to != c) {//remove edge from cut + cur -= e.cap; + }} + state.push_back({cur, c}); + } + int t = state.back().second; + state.pop_back(); + if (state.empty()) return 0; //graph is not connected?! + merge(state.back().second, t); + res = min(res, state.back().first); + } + return res; +} diff --git a/graph/treeIsomorphism.cpp b/graph/treeIsomorphism.cpp new file mode 100644 index 0000000..f3e147b --- /dev/null +++ b/graph/treeIsomorphism.cpp @@ -0,0 +1,41 @@ +vector<vector<vector<int>>> +getTreeLabel(const vector<vector<int>>& adj, int root) { + vector<vector<int>> level = {{root}}; + vector<int> dist(sz(adj), -1); + dist[root] = 0; + queue<int> q; + q.push(root); + while (!q.empty()) { + int c = q.front(); + q.pop(); + for (int n : adj[c]) { + if (dist[n] < 0) { + dist[n] = dist[c] + 1; + if (sz(level) <= dist[n]) level.push_back({}); + level[dist[n]].push_back(n); + q.push(n); + }}} + + vector<vector<vector<int>>> levelLabels(sz(level)); + vector<int> shortLabel(sz(adj)); + for (int l = sz(level) - 1; l >= 0; l--) { + vector<pair<vector<int>, int>> tmpLabels; + for (int v : level[l]) { + vector<int> label; + for (int n : adj[v]) { + if (dist[n] > dist[v]) { + label.push_back(shortLabel[n]); + }} + sort(all(label)); + tmpLabels.push_back({label, v}); + } + sort(all(tmpLabels)); + vector<int>& last = tmpLabels[0].first; + int curId = 0; + for (auto& e : tmpLabels) { + levelLabels[l].push_back(e.first); + if (e.first != last) curId++, last = e.first; + shortLabel[e.second] = curId; + }} + return levelLabels; +} |
