diff options
Diffstat (limited to 'content')
130 files changed, 791 insertions, 730 deletions
diff --git a/content/datastructures/datastructures.tex b/content/datastructures/datastructures.tex index c9f3d2a..c4bd312 100644 --- a/content/datastructures/datastructures.tex +++ b/content/datastructures/datastructures.tex @@ -10,7 +10,7 @@ \subsubsection{Lazy Propagation} Assignment modifications, sum queries \\ - \method{lower\_bound}{erster Index in $[l, r)$ $\geq$ x (erfordert max-combine)}{\log(n)} + \method{binary\_search}{kleinstes $x$ in $[l, r]$ mit $f(\text{query}(l, x))$ (monoton in $x$)}{\log(n)} \sourcecode{datastructures/lazyPropagation.cpp} \end{algorithm} @@ -20,6 +20,8 @@ \method{kth}{sort $[l, r)[k]$}{\log(\Sigma)} \method{countSmaller}{Anzahl elemente in $[l, r)$ kleiner als $k$}{\log(\Sigma)} \end{methods} + $\Sigma$ ist die Gr\"o\ss e des Eingabebereichs, d.h. + $\mathit{max} - \mathit{min}$. \sourcecode{datastructures/waveletTree.cpp} \end{algorithm} \columnbreak @@ -27,15 +29,15 @@ \begin{algorithm}{Fenwick Tree} \begin{methods} \method{init}{baut den Baum auf}{n\*\log(n)} - \method{prefix\_sum}{summe von $[0, i]$}{\log(n)} + \method{prefix\_sum}{summe von $[0, i)$}{\log(n)} \method{update}{addiert ein Delta zu einem Element}{\log(n)} \end{methods} \sourcecode{datastructures/fenwickTree.cpp} \begin{methods} \method{init}{baut den Baum auf}{n\*\log(n)} - \method{prefix\_sum}{summe von [$0, i]$}{\log(n)} - \method{update}{addiert ein Delta zu allen Elementen $[l, r)$. $l\leq r$!}{\log(n)} + \method{prefix\_sum}{summe von $[0, i)$}{\log(n)} + \method{update}{addiert ein Delta zu allen Elementen $[l, r)$}{\log(n)} \end{methods} \sourcecode{datastructures/fenwickTree2.cpp} \end{algorithm} @@ -56,7 +58,7 @@ \begin{algorithm}{Range Minimum Query} \begin{methods} \method{init}{baut Struktur auf}{n\*\log(n)} - \method{queryIdempotent}{Index des Minimums in $[l, r)$. $l<r$!}{1} + \method{query}{Index des Minimums in $[l, r)$}{1} \end{methods} \begin{itemize} \item \code{better}-Funktion muss idempotent sein! @@ -64,6 +66,14 @@ \sourcecode{datastructures/sparseTable.cpp} \end{algorithm} +\begin{algorithm}[optional]{Range Aggregate Query} + \begin{methods} + \method{init}{baut Struktur auf}{n\*\log(n)} + \method{query}{Aggregat über $[l,r)$}{1} + \end{methods} + \sourcecode{datastructures/sparseTableDisjoint.cpp} +\end{algorithm} + \begin{algorithm}{STL-Bitset} \sourcecode{datastructures/bitset.cpp} \end{algorithm} @@ -80,30 +90,43 @@ \end{methods} \sourcecode{datastructures/LCT.cpp} \end{algorithm} -\clearpage +\columnbreak -\begin{algorithm}{Lichao} - \sourcecode{datastructures/lichao.cpp} +\begin{algorithm}{Lower Envelope (Convex Hull Optimization)} + Um aus einem Lower Envelope einen Upper Envelope zu machen (oder + umgekehrt), einfach beim Einfügen der Geraden $m$ und $b$ negieren. + \subsubsection{Monotonic} + \begin{methods} + \method{add}{add line $mx + b$, $m$ is decreasing}{1} + \method{query}{minimum value at $x$, $x$ is increasing}{1} + \end{methods} + \sourcecode{datastructures/monotonicConvexHull.cpp} + \subsubsection{Dynamic} + \begin{methods} + \method{add}{add line $mx + b$}{\log(n)} + \method{query}{minimum value at $x$}{\log(n)} + \end{methods} + \sourcecode{datastructures/dynamicConvexHull.cpp} + \subsubsection{Li Chao Tree} + Every pair of functions has at most one intersection. + + \begin{methods} + \method{insert}{add function}{\log(|xs|)} + \method{query}{minimum value at $x$, $x \in xs$}{\log(|xs|)} + \end{methods} + \sourcecode{datastructures/lichao.cpp} \end{algorithm} \begin{algorithm}{Policy Based Data Structures} - \textbf{Wichtig:} Verwende \code{p.swap(p2)} anstatt \code{swap(p, p2)}! - \sourcecode{datastructures/stlPriorityQueue.cpp} - \columnbreak \sourcecode{datastructures/pbds.cpp} \end{algorithm} -\begin{algorithm}{Lower/Upper Envelope (Convex Hull Optimization)} - Um aus einem lower envelope einen upper envelope zu machen (oder umgekehrt), einfach beim Einfügen der Geraden $m$ und $b$ negieren. - \sourcecode{datastructures/monotonicConvexHull.cpp} - \sourcecode{datastructures/dynamicConvexHull.cpp} -\end{algorithm} - \begin{algorithm}{Union-Find} \begin{methods} \method{init}{legt $n$ einzelne Unions an}{n} \method{findSet}{findet den Repräsentanten}{\log(n)} \method{unionSets}{vereint 2 Mengen}{\log(n)} + \method{size}{zählt Elemente in Menge, die $a$ enthält}{\log(n)} \method{m\*findSet + n\*unionSets}{Folge von Befehlen}{n+m\*\alpha(n)} \end{methods} \sourcecode{datastructures/unionFind.cpp} diff --git a/content/datastructures/dynamicConvexHull.cpp b/content/datastructures/dynamicConvexHull.cpp index 63e0e13..36ef6f5 100644 --- a/content/datastructures/dynamicConvexHull.cpp +++ b/content/datastructures/dynamicConvexHull.cpp @@ -1,15 +1,15 @@ struct Line { mutable ll m, c, p; - bool operator<(const Line& o) const {return m < o.m;} - bool operator<(ll x) const {return p < x;} + bool operator<(const Line& o) const { return m < o.m; } + bool operator<(ll x) const { return p < x; } }; struct HullDynamic : multiset<Line, less<>> { // max über Geraden // (for doubles, use INF = 1/.0, div(a,c) = a/c) - ll div(ll a, ll c) {return a / c - ((a ^ c) < 0 && a % c);} + ll div(ll a, ll c) { return a / c - ((a ^ c) < 0 && a % c); } bool isect(iterator x, iterator y) { - if (y == end()) {x->p = INF; return false;} + if (y == end()) { x->p = INF; return false; } if (x->m == y->m) x->p = x->c > y->c ? INF : -INF; else x->p = div(y->c - x->c, x->m - y->m); return x->p >= y->p; @@ -19,13 +19,11 @@ struct HullDynamic : multiset<Line, less<>> { // max über Geraden auto x = insert({m, c, 0}); while (isect(x, next(x))) erase(next(x)); if (x != begin()) { - x--; - if (isect(x, next(x))) { - erase(next(x)); - isect(x, next(x)); - }} + --x; + while (isect(x, next(x))) erase(next(x)); + } while (x != begin() && prev(x)->p >= x->p) { - x--; + --x; isect(x, erase(next(x))); }} diff --git a/content/datastructures/fenwickTree.cpp b/content/datastructures/fenwickTree.cpp index eb5cd73..7013613 100644 --- a/content/datastructures/fenwickTree.cpp +++ b/content/datastructures/fenwickTree.cpp @@ -1,7 +1,7 @@ vector<ll> tree; void update(int i, ll val) { - for (i++; i < sz(tree); i += i & -i) tree[i] += val; + for (i++; i < ssize(tree); i += i & -i) tree[i] += val; } void init(int n) { @@ -10,6 +10,6 @@ void init(int n) { ll prefix_sum(int i) { ll sum = 0; - for (i++; i > 0; i -= i & -i) sum += tree[i]; + for (; i > 0; i &= i-1) sum += tree[i]; return sum; } diff --git a/content/datastructures/fenwickTree2.cpp b/content/datastructures/fenwickTree2.cpp index 9384e3c..7fcdbb9 100644 --- a/content/datastructures/fenwickTree2.cpp +++ b/content/datastructures/fenwickTree2.cpp @@ -1,21 +1,21 @@ vector<ll> add, mul; void update(int l, int r, ll val) { - for (int tl = l + 1; tl < sz(add); tl += tl & -tl) + for (int tl = l + 1; tl < ssize(add); tl += tl & -tl) add[tl] += val, mul[tl] -= val * l; - for (int tr = r + 1; tr < sz(add); tr += tr & -tr) + for (int tr = r + 1; tr < ssize(add); tr += tr & -tr) add[tr] -= val, mul[tr] += val * r; } -void init(vector<ll>& v) { - mul.assign(sz(v) + 1, 0); - add.assign(sz(v) + 1, 0); - for(int i = 0; i < sz(v); i++) update(i, i + 1, v[i]); +void init(vector<ll> &v) { + mul.assign(size(v) + 1, 0); + add.assign(size(v) + 1, 0); + for(int i = 0; i < ssize(v); i++) update(i, i + 1, v[i]); } ll prefix_sum(int i) { - ll res = 0; i++; - for (int ti = i; ti > 0; ti -= ti & -ti) + ll res = 0; + for (int ti = i; ti > 0; ti &= ti-1) res += add[ti] * i + mul[ti]; return res; } diff --git a/content/datastructures/lazyPropagation.cpp b/content/datastructures/lazyPropagation.cpp index ab91364..a5be822 100644 --- a/content/datastructures/lazyPropagation.cpp +++ b/content/datastructures/lazyPropagation.cpp @@ -1,23 +1,22 @@ struct SegTree { using T = ll; using U = ll; - int n; static constexpr T E = 0; // Neutral element for combine - static constexpr U UF = INF; // Unused value by updates - vector<T> tree; + static constexpr U UF = 1e18; // Unused value by updates + int n; + vector<T> tree; vector<U> lazy; int h; - vector<U> lazy; - vector<int> k; // size of segments (optional) + vector<ll> k; // size of segments (optional) - SegTree(const vector<T>& a) : n(sz(a) + 1), tree(2 * n, E), + SegTree(const vector<T>& a) : n(ssize(a) + 1), tree(2 * n, E), //SegTree(int size, T def = E) : n(size + 1), tree(2 * n, def), - h(__lg(2 * n)), lazy(n, UF), k(2 * n, 1) { - copy(all(a), tree.begin() + n); + lazy(n, UF), h(__lg(2 * n)), k(2 * n, 1) { + ranges::copy(a, tree.begin() + n); for (int i = n - 1; i > 0; i--) { k[i] = 2 * k[2 * i]; tree[i] = comb(tree[2 * i], tree[2 * i + 1]); }} - T comb(T a, T b) {return a + b;} // Modify this + E + T comb(T a, T b) { return a + b; } // Modify this + E void apply(int i, U val) { // And this + UF tree[i] = val * k[i]; @@ -44,17 +43,17 @@ struct SegTree { void update(int l, int r, U val) { l += n, r += n; int l0 = l, r0 = r; - push(l0), push(r0 - 1); + push(l0), push(r0); for (; l < r; l /= 2, r /= 2) { if (l&1) apply(l++, val); if (r&1) apply(--r, val); } - build(l0), build(r0 - 1); + build(l0), build(r0); } T query(int l, int r) { l += n, r += n; - push(l), push(r - 1); + push(l), push(r); T resL = E, resR = E; for (; l < r; l /= 2, r /= 2) { if (l&1) resL = comb(resL, tree[l++]); @@ -64,21 +63,23 @@ struct SegTree { } // Optional: - int lower_bound(int l, int r, T x) { + int binary_search(int l, int r, auto &&f) { + if (f(E)) return l; l += n, r += n; - push(l), push(r - 1); + push(l), push(r); int a[64] = {}, lp = 0, rp = 64; for (; l < r; l /= 2, r /= 2) { if (l&1) a[lp++] = l++; if (r&1) a[--rp] = --r; } - for (int i : a) if (i != 0 && tree[i] >= x) { // Modify this + T x = E, y = x; + for (int i : a) if (i != 0 && f(x = comb(y = x, tree[i]))) { while (i < n) { push_down(i); - if (tree[2 * i] >= x) i = 2 * i; // And this - else i = 2 * i + 1; + if (f(x = comb(y, tree[2*i]))) i = 2 * i; + else i = 2 * i + 1, y = x; } - return i - n; + return i - n + 1; } return -1; } diff --git a/content/datastructures/lichao.cpp b/content/datastructures/lichao.cpp index 1318ca7..bdbf5f9 100644 --- a/content/datastructures/lichao.cpp +++ b/content/datastructures/lichao.cpp @@ -1,9 +1,10 @@ vector<ll> xs; // IMPORTANT: Initialize before constructing! -int findX(int i) {return lower_bound(all(xs), i) - begin(xs);} +int findX(int i) { + return ranges::lower_bound(xs, i) - begin(xs); } -struct Fun { // Default: Linear function. Change as needed. +struct Fun { // Default: Linear function. Change as needed. ll m, c; - ll operator()(int x) {return m*xs[x] + c;} + ll operator()(int x) { return m*xs[x] + c; } }; // Default: Computes min. Change lines with comment for max. @@ -11,18 +12,18 @@ struct Lichao { static constexpr Fun id = {0, INF}; // {0, -INF} int n, cap; vector<Fun> seg; - Lichao() : n(sz(xs)), cap(2 << __lg(n)), seg(2 * cap, id) {} - + Lichao() : n(ssize(xs)), cap(2 << __lg(n)), seg(2 * cap, id) {} + void _insert(Fun f, int l, int r, int i) { while (i < 2 * cap) { int m = (l+r)/2; - if (m >= n) {r = m; i = 2*i; continue;} + if (m >= n) { r = m; i = 2*i; continue; } Fun &g = seg[i]; if (f(m) < g(m)) swap(f, g); // > if (f(l) < g(l)) r = m, i = 2*i; // > else l = m, i = 2*i+1; }} - void insert(Fun f) {_insert(f, 0, cap, 1);} + void insert(Fun f) { _insert(f, 0, cap, 1); } void _segmentInsert(Fun f, int l, int r, int a, int b, int i) { if (l <= a && b <= r) _insert(f, a, b, i); @@ -42,5 +43,5 @@ struct Lichao { } return ans; } - ll query(ll x) {return _query(findX(x));} + ll query(ll x) { return _query(findX(x)); } }; diff --git a/content/datastructures/monotonicConvexHull.cpp b/content/datastructures/monotonicConvexHull.cpp index f1721ae..295acc4 100644 --- a/content/datastructures/monotonicConvexHull.cpp +++ b/content/datastructures/monotonicConvexHull.cpp @@ -1,27 +1,25 @@ -// Min über Geraden mit MONOTONEN Inserts UND Queries. Jede neue -// Gerade hat kleineres pair(m, c) als alle vorherigen. -struct Line { - ll m, c; - ll operator()(ll x) {return m*x+c;} -}; +struct Envelope { + struct Line { + ll m, b; + ll operator()(ll x) { return m*x+b; } + }; -vector<Line> ls; -ll ptr = 0; + vector<Line> ls; + int ptr = 0; -bool bad(Line l1, Line l2, Line l3) { - return (l3.c-l1.c)*(l1.m-l2.m) < (l2.c-l1.c)*(l1.m-l3.m); -} + static bool bad(Line l1, Line l2, Line l3) { + return (l3.b-l1.b)*(l1.m-l2.m) < (l2.b-l1.b)*(l1.m-l3.m); + } -void add(ll m, ll c) { // m fallend, Laufzeit O(1) amortisiert - while (sz(ls) > 1 && bad(ls.end()[-2], ls.end()[-1], {m, c})) { - ls.pop_back(); + void add(ll m, ll b) { + while (ssize(ls) > 1 + && bad(ls.end()[-2], ls.back(), {m,b})) ls.pop_back(); + ls.push_back({m, b}); + ptr = min(ptr, (int)ssize(ls) - 1); } - ls.push_back({m, c}); - ptr = min(ptr, sz(ls) - 1); -} -ll query(ll x) { // x >= letztes x, Laufzeit: O(1) amortisiert - ptr = min(ptr, sz(ls) - 1); - while (ptr + 1 < sz(ls) && ls[ptr + 1](x) < ls[ptr](x)) ptr++; - return ls[ptr](x); -}
\ No newline at end of file + ll query(ll x) { + while (ptr < ssize(ls)-1 && ls[ptr+1](x) < ls[ptr](x)) ptr++; + return ls[ptr](x); + } +}; diff --git a/content/datastructures/pbds.cpp b/content/datastructures/pbds.cpp index de0ace6..734bf91 100644 --- a/content/datastructures/pbds.cpp +++ b/content/datastructures/pbds.cpp @@ -1,14 +1,22 @@ +#include <ext/pb_ds/priority_queue.hpp> +template<typename T> +using pQueue = __gnu_pbds::priority_queue<T>; //<T, greater<T>> +auto it = pq.push(5); // O(1) +pq.modify(it, 6); // O(log n) +pq.erase(it); // O(log n) +pq.join(pq2); // O(1) +pq.swap(pq2); // O(1) + #include <ext/pb_ds/assoc_container.hpp> using namespace __gnu_pbds; template<typename T> using Tree = tree<T, null_type, less<T>, rb_tree_tag, tree_order_statistics_node_update>; -// T.order_of_key(x): number of elements strictly less than x -// *T.find_by_order(k): k-th element +T.order_of_key(x); // number of elements strictly less than x +auto it = T.find_by_order(k); // k-th element constexpr uint64_t RNG = ll(2e18 * acos(-1)) | 199; // random odd -template<typename T> -struct chash { +template<typename T> struct chash { size_t operator()(T o) const { return __builtin_bswap64(hash<T>()(o) * RNG); }}; diff --git a/content/datastructures/persistent.cpp b/content/datastructures/persistent.cpp index f26680d..ed2f891 100644 --- a/content/datastructures/persistent.cpp +++ b/content/datastructures/persistent.cpp @@ -1,18 +1,18 @@ -template<typename T>
-struct persistent {
- int& time;
- vector<pair<int, T>> data;
-
- persistent(int& time, T value = {})
- : time(time), data(1, {2*time, value}) {}
-
- T get(int t) {
- return prev(upper_bound(all(data),pair{2*t+1, T{}}))->second;
- }
-
- int set(T value) {
- time++;
- data.push_back({2*time, value});
- return time;
- }
-};
+template<typename T> +struct persistent { + int& time; + vector<pair<int, T>> data; + + persistent(int& time, T value = {}) + : time(time), data(1, {2*time, value}) {} + + T get(int t) { + return ranges::upper_bound(data,pair{2*t+1, T{}})[-1].second; + } + + int set(T value) { + time++; + data.push_back({2*time, value}); + return time; + } +}; diff --git a/content/datastructures/persistentArray.cpp b/content/datastructures/persistentArray.cpp index 8326700..903bd0e 100644 --- a/content/datastructures/persistentArray.cpp +++ b/content/datastructures/persistentArray.cpp @@ -1,24 +1,24 @@ -template<typename T>
-struct persistentArray {
- int time;
- vector<persistent<T>> data;
- vector<pair<int, int>> mods;
-
- persistentArray(int n, T value = {})
- : time(0), data(n, {time, value}) {}
-
- T get(int p, int t) {return data[p].get(t);}
-
- int set(int p, T value) {
- mods.push_back({p, data[p].set(value)});
- return mods.back().second;
- }
-
- void reset(int t) {
- while (!mods.empty() && mods.back().second > t) {
- data[mods.back().first].data.pop_back();
- mods.pop_back();
- }
- time = t;
- }
-};
+template<typename T> +struct persistentArray { + int time; + vector<persistent<T>> data; + vector<pair<int, int>> mods; + + persistentArray(int n, T value = {}) + : time(0), data(n, {time, value}) {} + + T get(int p, int t) { return data[p].get(t); } + + int set(int p, T value) { + mods.push_back({p, data[p].set(value)}); + return mods.back().second; + } + + void reset(int t) { + while (!mods.empty() && mods.back().second > t) { + data[mods.back().first].data.pop_back(); + mods.pop_back(); + } + time = t; + } +}; diff --git a/content/datastructures/segmentTree.cpp b/content/datastructures/segmentTree.cpp index 6b69d0b..1fbf886 100644 --- a/content/datastructures/segmentTree.cpp +++ b/content/datastructures/segmentTree.cpp @@ -4,14 +4,15 @@ struct SegTree { vector<T> tree; static constexpr T E = 0; // Neutral element for combine - SegTree(vector<T>& a) : n(sz(a)), tree(2 * n) { - //SegTree(int size, T val = E) : n(size), tree(2 * n, val) { - copy(all(a), tree.begin() + n); + SegTree(vector<T>& a) : n(ssize(a)), tree(2 * n, E) { + ranges::copy(a, tree.begin() + n); + //SegTree(int size, T val = E) : n(size), tree(2 * n, E) { + // fill(tree.begin() + n, tree.end(), val); for (int i = n - 1; i > 0; i--) { // remove for range update tree[i] = comb(tree[2 * i], tree[2 * i + 1]); }} - T comb(T a, T b) {return a + b;} // modify this + neutral + T comb(T a, T b) { return a + b; } // modify this + neutral void update(int i, T val) { tree[i += n] = val; // apply update code diff --git a/content/datastructures/sparseTable.cpp b/content/datastructures/sparseTable.cpp index b3f946e..5455ef5 100644 --- a/content/datastructures/sparseTable.cpp +++ b/content/datastructures/sparseTable.cpp @@ -6,17 +6,17 @@ struct SparseTable { return a[lidx] <= a[ridx] ? lidx : ridx; } - void init(vector<ll>* vec) { - int n = sz(*vec); - a = vec->data(); + void init(vector<ll> &vec) { + int n = ssize(vec); + a = vec.data(); st.assign(__lg(n) + 1, vector<int>(n)); - iota(all(st[0]), 0); + iota(begin(st[0]), end(st[0]), 0); for (int j = 0; (2 << j) <= n; j++) { for (int i = 0; i + (2 << j) <= n; i++) { st[j + 1][i] = better(st[j][i] , st[j][i + (1 << j)]); }}} - int queryIdempotent(int l, int r) { + int query(int l, int r) { if (r <= l) return -1; int j = __lg(r - l); //31 - builtin_clz(r - l); return better(st[j][l] , st[j][r - (1 << j)]); diff --git a/content/datastructures/sparseTableDisjoint.cpp b/content/datastructures/sparseTableDisjoint.cpp index 55165d4..bcf6b2e 100644 --- a/content/datastructures/sparseTableDisjoint.cpp +++ b/content/datastructures/sparseTableDisjoint.cpp @@ -7,16 +7,16 @@ struct DisjointST { return x + y; } - void init(vector<ll>* vec) { - int n = sz(*vec); - a = vec->data(); + void init(vector<ll> &vec) { + int n = ssize(vec); + a = vec.data(); dst.assign(__lg(n) + 1, vector<ll>(n + 1, neutral)); for (int h = 0, l = 1; l <= n; h++, l *= 2) { for (int c = l; c < n + l; c += 2 * l) { for (int i = c; i < min(n, c + l); i++) - dst[h][i + 1] = combine(dst[h][i], vec->at(i)); + dst[h][i + 1] = combine(dst[h][i], vec[i]); for (int i = min(n, c); i > c - l; i--) - dst[h][i - 1] = combine(vec->at(i - 1), dst[h][i]); + dst[h][i - 1] = combine(vec[i - 1], dst[h][i]); }}} ll query(int l, int r) { diff --git a/content/datastructures/stlHashMap.cpp b/content/datastructures/stlHashMap.cpp deleted file mode 100644 index b107dde..0000000 --- a/content/datastructures/stlHashMap.cpp +++ /dev/null @@ -1,17 +0,0 @@ -#include <ext/pb_ds/assoc_container.hpp> -using namespace __gnu_pbds; - -template<typename T> -struct betterHash { - size_t operator()(T o) const { - size_t h = hash<T>()(o) ^ 42394245; //random value - h = ((h >> 16) ^ h) * 0x45d9f3b; - h = ((h >> 16) ^ h) * 0x45d9f3b; - h = ((h >> 16) ^ h); - return h; -}}; - -template<typename K, typename V, typename H = betterHash<K>> -using hashMap = gp_hash_table<K, V, H>; -template<typename K, typename H = betterHash<K>> -using hashSet = gp_hash_table<K, null_type, H>; diff --git a/content/datastructures/stlPriorityQueue.cpp b/content/datastructures/stlPriorityQueue.cpp deleted file mode 100644 index 32b2455..0000000 --- a/content/datastructures/stlPriorityQueue.cpp +++ /dev/null @@ -1,8 +0,0 @@ -#include <ext/pb_ds/priority_queue.hpp> -template<typename T> -using pQueue = __gnu_pbds::priority_queue<T>; //<T, greater<T>> - -auto it = pq.push(5); -pq.modify(it, 6); -pq.join(pq2); -// push, join are O(1), pop, modify, erase O(log n) amortized diff --git a/content/datastructures/stlTree.cpp b/content/datastructures/stlTree.cpp deleted file mode 100644 index fbb68b9..0000000 --- a/content/datastructures/stlTree.cpp +++ /dev/null @@ -1,13 +0,0 @@ -#include <ext/pb_ds/assoc_container.hpp> -#include <ext/pb_ds/tree_policy.hpp> -using namespace std; using namespace __gnu_pbds; -template<typename T> -using Tree = tree<T, null_type, less<T>, rb_tree_tag, - tree_order_statistics_node_update>; - -int main() { - Tree<int> X; - for (int i : {1, 2, 4, 8, 16}) X.insert(i); - *X.find_by_order(3); // => 8 - X.order_of_key(10); // => 4 = min i, mit X[i] >= 10 -} diff --git a/content/datastructures/treap.cpp b/content/datastructures/treap.cpp index c5a60e9..bddfdb4 100644 --- a/content/datastructures/treap.cpp +++ b/content/datastructures/treap.cpp @@ -66,7 +66,7 @@ struct Treap { void insert(int i, ll val) { // and i = val auto [left, right] = split(root, i); treap.emplace_back(val); - left = merge(left, sz(treap) - 1); + left = merge(left, ssize(treap) - 1); root = merge(left, right); } diff --git a/content/datastructures/waveletTree.cpp b/content/datastructures/waveletTree.cpp index 090cdb2..55167b6 100644 --- a/content/datastructures/waveletTree.cpp +++ b/content/datastructures/waveletTree.cpp @@ -1,25 +1,20 @@ struct WaveletTree { - using it = vector<ll>::iterator; - WaveletTree *ln = nullptr, *rn = nullptr; + unique_ptr<WaveletTree> ln, rn; vector<int> b = {0}; ll lo, hi; - WaveletTree(vector<ll> in) : WaveletTree(all(in)) {} - - WaveletTree(it from, it to) : // call above one - lo(*min_element(from, to)), hi(*max_element(from, to) + 1) { + WaveletTree(auto in) : lo(*ranges::min_element(in)), + hi(*ranges::max_element(in) + 1) { ll mid = (lo + hi) / 2; - auto f = [&](ll x) {return x < mid;}; - for (it c = from; c != to; c++) { - b.push_back(b.back() + f(*c)); - } + auto f = [&](ll x) { return x < mid; }; + for (ll x: in) b.push_back(b.back() + f(x)); if (lo + 1 >= hi) return; - it pivot = stable_partition(from, to, f); - ln = new WaveletTree(from, pivot); - rn = new WaveletTree(pivot, to); + auto right = ranges::stable_partition(in, f); + ln = make_unique<WaveletTree>( + ranges::subrange(begin(in), begin(right))); + rn = make_unique<WaveletTree>(right); } - // kth element in sort[l, r) all 0-indexed ll kth(int l, int r, int k) { if (k < 0 || l + k >= r) return -1; if (lo + 1 >= hi) return lo; @@ -28,13 +23,10 @@ struct WaveletTree { else return rn->kth(l-b[l], r-b[r], k-inLeft); } - // count elements in[l, r) smaller than k int countSmaller(int l, int r, ll k) { if (l >= r || k <= lo) return 0; if (hi <= k) return r - l; return ln->countSmaller(b[l], b[r], k) + rn->countSmaller(l-b[l], r-b[r], k); } - - ~WaveletTree() {delete ln; delete rn;} }; diff --git a/content/geometry/antipodalPoints.cpp b/content/geometry/antipodalPoints.cpp index 110cc74..b34b175 100644 --- a/content/geometry/antipodalPoints.cpp +++ b/content/geometry/antipodalPoints.cpp @@ -1,12 +1,12 @@ vector<pair<int, int>> antipodalPoints(vector<pt>& h) { - if (sz(h) < 2) return {}; + if (ssize(h) < 2) return {}; vector<pair<int, int>> result; for (int i = 0, j = 1; i < j; i++) { while (true) { result.push_back({i, j}); - if (cross(h[(i + 1) % sz(h)] - h[i], - h[(j + 1) % sz(h)] - h[j]) <= 0) break; - j = (j + 1) % sz(h); + if (cross(h[(i + 1) % ssize(h)] - h[i], + h[(j + 1) % ssize(h)] - h[j]) <= 0) break; + j = (j + 1) % ssize(h); }} return result; } diff --git a/content/geometry/circle.cpp b/content/geometry/circle.cpp index 6789c52..155b55c 100644 --- a/content/geometry/circle.cpp +++ b/content/geometry/circle.cpp @@ -22,7 +22,7 @@ vector<pt> circleRayIntersection(pt center, double r, double c = norm(orig - center) - r * r; double discr = b * b - 4 * a * c; if (discr >= 0) { - //t in [0, 1] => schnitt mit Segment [orig, orig + dir] + //t in [0, 1] => Schnitt mit Segment [orig, orig + dir] double t1 = -(b + sqrt(discr)) / (2 * a); double t2 = -(b - sqrt(discr)) / (2 * a); if (t1 >= 0) result.push_back(t1 * dir + orig); diff --git a/content/geometry/closestPair.cpp b/content/geometry/closestPair.cpp index 9b115f3..bbefa67 100644 --- a/content/geometry/closestPair.cpp +++ b/content/geometry/closestPair.cpp @@ -4,12 +4,11 @@ ll rec(vector<pt>::iterator a, int l, int r) { ll midx = a[m].real(); ll ans = min(rec(a, l, m), rec(a, m, r)); - inplace_merge(a+l, a+m, a+r, [](const pt& x, const pt& y) { - return x.imag() < y.imag(); - }); + ranges::inplace_merge(a+l, a+m, a+r, {}, + [](pt x) { return imag(x); }); pt tmp[8]; - fill(all(tmp), a[l]); + ranges::fill(tmp, a[l]); for (int i = l + 1, next = 0; i < r; i++) { if (ll x = a[i].real() - midx; x * x < ans) { for (pt& p : tmp) ans = min(ans, norm(p - a[i])); @@ -19,9 +18,7 @@ ll rec(vector<pt>::iterator a, int l, int r) { return ans; } -ll shortestDist(vector<pt> a) { // sz(pts) > 1 - sort(all(a), [](const pt& x, const pt& y) { - return x.real() < y.real(); - }); - return rec(a.begin(), 0, sz(a)); +ll shortestDist(vector<pt> a) { // size(pts) > 1 + ranges::sort(a, {}, [](pt x) { return real(x); }); + return rec(a.begin(), 0, ssize(a)); } diff --git a/content/geometry/convexHull.cpp b/content/geometry/convexHull.cpp index 1173924..03c6343 100644 --- a/content/geometry/convexHull.cpp +++ b/content/geometry/convexHull.cpp @@ -1,18 +1,16 @@ vector<pt> convexHull(vector<pt> pts){ - sort(all(pts), [](const pt& a, const pt& b){ - return real(a) == real(b) ? imag(a) < imag(b) - : real(a) < real(b); - }); - pts.erase(unique(all(pts)), pts.end()); + ranges::sort(pts, {}, + [](pt x) { return pair{real(x), imag(x)}; }); + pts.erase(begin(ranges::unique(pts)), end(pts)); int k = 0; - vector<pt> h(2 * sz(pts)); - auto half = [&](auto begin, auto end, int t) { - for (auto it = begin; it != end; it++) { - while (k > t && cross(h[k-2], h[k-1], *it) <= 0) k--; - h[k++] = *it; + vector<pt> h(2 * ssize(pts)); + auto half = [&](auto &&v, int t) { + for (auto x: v) { + while (k > t && cross(h[k-2], h[k-1], x) <= 0) k--; + h[k++] = x; }}; - half(all(pts), 1); // Untere Hülle. - half(next(pts.rbegin()), pts.rend(), k); // Obere Hülle. + half(pts, 1); // Untere Hülle. + half(pts | views::reverse | views::drop(1), k); // Obere Hülle h.resize(k); return h; } diff --git a/content/geometry/delaunay.cpp b/content/geometry/delaunay.cpp index c813892..9ae9061 100644 --- a/content/geometry/delaunay.cpp +++ b/content/geometry/delaunay.cpp @@ -3,7 +3,8 @@ using pt = complex<lll>; constexpr pt INF_PT = pt(2e18, 2e18); -bool circ(pt p, pt a, pt b, pt c) {// p in circle(A,B,C), ABC must be ccw +// p in circle(A,B,C), ABC must be ccw +bool circ(pt p, pt a, pt b, pt c) { return imag((c-b)*conj(p-c)*(a-p)*conj(b-a)) < 0; } @@ -12,10 +13,10 @@ struct QuadEdge { QuadEdge* onext = nullptr; pt orig = INF_PT; bool used = false; - QuadEdge* rev() const {return rot->rot;} - QuadEdge* lnext() const {return rot->rev()->onext->rot;} - QuadEdge* oprev() const {return rot->onext->rot;} - pt dest() const {return rev()->orig;} + QuadEdge* rev() const { return rot->rot; } + QuadEdge* lnext() const { return rot->rev()->onext->rot; } + QuadEdge* oprev() const { return rot->onext->rot; } + pt dest() const { return rev()->orig; } }; deque<QuadEdge> edgeData; @@ -98,12 +99,10 @@ pair<QuadEdge*, QuadEdge*> rec(IT l, IT r) { } vector<pt> delaunay(vector<pt> pts) { - if (sz(pts) <= 2) return {}; - sort(all(pts), [](const pt& a, const pt& b) { - if (real(a) != real(b)) return real(a) < real(b); - return imag(a) < imag(b); - }); - QuadEdge* r = rec(all(pts)).first; + if (ssize(pts) <= 2) return {}; + ranges::sort(pts, {}, + [](pt x) { return pair{real(x), imag(x)}; }); + QuadEdge* r = rec(begin(pts), end(pts)).first; vector<QuadEdge*> edges = {r}; while (cross(r->onext->dest(), r->dest(), r->orig) < 0) r = r->onext; auto add = [&](QuadEdge* e){ @@ -117,7 +116,7 @@ vector<pt> delaunay(vector<pt> pts) { }; add(r); pts.clear(); - for (int i = 0; i < sz(edges); i++) { + for (int i = 0; i < ssize(edges); i++) { if (!edges[i]->used) add(edges[i]); } return pts; diff --git a/content/geometry/formulas.cpp b/content/geometry/formulas.cpp index 5d4e10d..b339451 100644 --- a/content/geometry/formulas.cpp +++ b/content/geometry/formulas.cpp @@ -6,20 +6,17 @@ constexpr double PIU = acos(-1.0l); // PIL < PI < PIU constexpr double PIL = PIU-2e-19l; // Winkel zwischen Punkt und x-Achse in [-PI, PI]. -double angle(pt a) {return arg(a);} +double angle(pt a) { return arg(a); } // rotiert Punkt im Uhrzeigersinn um den Ursprung. -pt rotate(pt a, double theta) {return a * polar(1.0, theta);} +pt rotate(pt a, double theta) { return a * polar(1.0, theta); } // Skalarprodukt. -auto dot(pt a, pt b) {return real(conj(a) * b);} - -// abs()^2.(pre c++20) -auto norm(pt a) {return dot(a, a);} +auto dot(pt a, pt b) { return real(conj(a) * b); } // Kreuzprodukt, 0, falls kollinear. -auto cross(pt a, pt b) {return imag(conj(a) * b);} -auto cross(pt p, pt a, pt b) {return cross(a - p, b - p);} +auto cross(pt a, pt b) { return imag(conj(a) * b); } +auto cross(pt p, pt a, pt b) { return cross(a - p, b - p); } // 1 => c links von a->b // 0 => a, b und c kolliniear diff --git a/content/geometry/formulas3d.cpp b/content/geometry/formulas3d.cpp index 63de2ce..66a4644 100644 --- a/content/geometry/formulas3d.cpp +++ b/content/geometry/formulas3d.cpp @@ -2,20 +2,20 @@ auto operator|(pt3 a, pt3 b) { return a.x * b.x + a.y*b.y + a.z*b.z; } -auto dot(pt3 a, pt3 b) {return a|b;} +auto dot(pt3 a, pt3 b) { return a|b; } // Kreuzprodukt -pt3 operator*(pt3 a, pt3 b) {return {a.y*b.z - a.z*b.y, - a.z*b.x - a.x*b.z, - a.x*b.y - a.y*b.x};} -pt3 cross(pt3 a, pt3 b) {return a*b;} +pt3 operator*(pt3 a, pt3 b) { return {a.y*b.z - a.z*b.y, + a.z*b.x - a.x*b.z, + a.x*b.y - a.y*b.x}; } +pt3 cross(pt3 a, pt3 b) { return a*b; } // Länge von a -double abs(pt3 a) {return sqrt(dot(a, a));} -double abs(pt3 a, pt3 b) {return abs(b - a);} +double abs(pt3 a) { return sqrt(dot(a, a)); } +double abs(pt3 a, pt3 b) { return abs(b - a); } // Mixedprodukt -auto mixed(pt3 a, pt3 b, pt3 c) {return a*b|c;}; +auto mixed(pt3 a, pt3 b, pt3 c) { return a*b|c; } // orientierung von p zu der Ebene durch a, b, c // -1 => gegen den Uhrzeigersinn, diff --git a/content/geometry/geometry.tex b/content/geometry/geometry.tex index 92285c4..9290de4 100644 --- a/content/geometry/geometry.tex +++ b/content/geometry/geometry.tex @@ -7,7 +7,7 @@ \sourcecode{geometry/closestPair.cpp} \end{algorithm} -\begin{algorithm}{Konvexehülle} +\begin{algorithm}{Konvexe Hülle} \begin{methods} \method{convexHull}{berechnet konvexe Hülle}{n\*\log(n)} \end{methods} @@ -18,6 +18,7 @@ \end{itemize} \sourcecode{geometry/convexHull.cpp} \end{algorithm} +\columnbreak \begin{algorithm}{Rotating calipers} \begin{methods} @@ -29,6 +30,7 @@ \subsection{Formeln~~--~\texttt{std::complex}} \sourcecode{geometry/formulas.cpp} +\columnbreak \sourcecode{geometry/linesAndSegments.cpp} \sourcecode{geometry/sortAround.cpp} \input{geometry/triangle} @@ -40,7 +42,7 @@ \sourcecode{geometry/formulas3d.cpp} \optional{ - \subsection{3D-Kugeln} + \subsection{3D-Kugeln \opthint} \sourcecode{geometry/spheres.cpp} } @@ -48,15 +50,22 @@ \sourcecode{geometry/hpi.cpp} \end{algorithm} +\begin{algorithm}[optional]{Intersecting Segments} + \begin{methods} + \method{intersect}{finds ids of intersecting segments}{n\*\log(n)} + \end{methods} + \sourcecode{geometry/segmentIntersection.cpp} +\end{algorithm} + \begin{algorithm}[optional]{Delaunay Triangulierung} \begin{methods} \method{delaunay}{berechnet Triangulierung}{n\*\log(n)} \end{methods} - \textbf{WICHTIG:} Wenn alle Punkte kollinear sind gibt es keine Traingulierung! Wenn 4 Punkte auf einem Kreis liegen ist die Triangulierung nicht eindeutig. + \textbf{WICHTIG:} Wenn alle Punkte kollinear sind gibt es keine Triangulierung! Wenn 4 Punkte auf einem Kreis liegen ist die Triangulierung nicht eindeutig. \sourcecode{geometry/delaunay.cpp} \end{algorithm} \optional{ -\subsection{Geraden} +\subsection{Geraden \opthint} \sourcecode{geometry/lines.cpp} } diff --git a/content/geometry/hpi.cpp b/content/geometry/hpi.cpp index 02c71e3..ec27254 100644 --- a/content/geometry/hpi.cpp +++ b/content/geometry/hpi.cpp @@ -1,6 +1,6 @@ constexpr ll INF = 0x1FFF'FFFF'FFFF'FFFF; //THIS CODE IS WIP -bool left(pt p) {return real(p) < 0 || +bool left(pt p) {return real(p) < 0 || (real(p) == 0 && imag(p) < 0);} struct hp { pt from, to; @@ -11,7 +11,7 @@ struct hp { bool dummy() const {return from == to;} pt dir() const {return dummy() ? to : to - from;} bool operator<(const hp& o) const { - if (left(dir()) != left(o.dir())) + if (left(dir()) != left(o.dir())) return left(dir()) > left(o.dir()); return cross(dir(), o.dir()) > 0; } diff --git a/content/geometry/linesAndSegments.cpp b/content/geometry/linesAndSegments.cpp index ddab554..985ee24 100644 --- a/content/geometry/linesAndSegments.cpp +++ b/content/geometry/linesAndSegments.cpp @@ -28,9 +28,7 @@ pt projectToLine(pt a, pt b, pt p) { // sortiert alle Punkte pts auf einer Linie entsprechend dir void sortLine(pt dir, vector<pt>& pts) { // (2d und 3d) - sort(all(pts), [&](pt a, pt b){ - return dot(dir, a) < dot(dir, b); - }); + ranges::sort(pts, {}, [&](pt x) { return dot(dir, x); }); } // Liegt p auf der Strecke a-b? (nutze < für inberhalb) @@ -66,7 +64,7 @@ vector<pt> segmentIntersection2(pt a, pt b, pt c, pt d) { double x = cross(b - a, d - c); double y = cross(c - a, d - c); double z = cross(b - a, a - c); - if (x < 0) {x = -x; y = -y; z = -z;} + if (x < 0) { x = -x; y = -y; z = -z; } if (y < -EPS || y-x > EPS || z < -EPS || z-x > EPS) return {}; if (x > EPS) return {a + y/x*(b - a)}; vector<pt> result; diff --git a/content/geometry/polygon.cpp b/content/geometry/polygon.cpp index 1332a4a..474ce88 100644 --- a/content/geometry/polygon.cpp +++ b/content/geometry/polygon.cpp @@ -2,7 +2,7 @@ // Punkte gegen den Uhrzeigersinn: positiv, sonst negativ. double area(const vector<pt>& poly) { //poly[0] == poly.back() ll res = 0; - for (int i = 0; i + 1 < sz(poly); i++) + for (int i = 0; i + 1 < ssize(poly); i++) res += cross(poly[i], poly[i + 1]); return 0.5 * res; } @@ -13,7 +13,7 @@ double area(const vector<pt>& poly) { //poly[0] == poly.back() // selbstschneidenden Polygonen (definitions Sache) ll windingNumber(pt p, const vector<pt>& poly) { ll res = 0; - for (int i = 0; i + 1 < sz(poly); i++) { + for (int i = 0; i + 1 < ssize(poly); i++) { pt a = poly[i], b = poly[i + 1]; if (real(a) > real(b)) swap(a, b); if (real(a) <= real(p) && real(p) < real(b) && @@ -26,7 +26,7 @@ ll windingNumber(pt p, const vector<pt>& poly) { // check if point is inside polygon (any polygon) bool inside(pt p, const vector<pt>& poly) { bool in = false; - for (int i = 0; i + 1 < sz(poly); i++) { + for (int i = 0; i + 1 < ssize(poly); i++) { pt a = poly[i], b = poly[i + 1]; if (pointOnSegment(a, b, p)) return false; // border counts? if (real(a) > real(b)) swap(a, b); @@ -40,7 +40,7 @@ bool inside(pt p, const vector<pt>& poly) { // convex hull without duplicates, h[0] != h.back() // apply comments if border counts as inside bool insideConvex(pt p, const vector<pt>& hull) { - int l = 0, r = sz(hull) - 1; + int l = 0, r = ssize(hull) - 1; if (cross(hull[0], hull[r], p) >= 0) return false; // > 0 while (l + 1 < r) { int m = (l + r) / 2; @@ -51,11 +51,9 @@ bool insideConvex(pt p, const vector<pt>& hull) { } void rotateMin(vector<pt>& hull) { - auto mi = min_element(all(hull), [](const pt& a, const pt& b){ - return real(a) == real(b) ? imag(a) < imag(b) - : real(a) < real(b); - }); - rotate(hull.begin(), mi, hull.end()); + auto mi = ranges::min_element(hull, {}, + [](pt a) { return pair{real(a), imag(a)}; }); + ranges::rotate(hull, mi); } // convex hulls without duplicates, h[0] != h.back() @@ -67,7 +65,7 @@ vector<pt> minkowski(vector<pt> ps, vector<pt> qs) { ps.push_back(ps[1]); qs.push_back(qs[1]); vector<pt> res; - for (ll i = 0, j = 0; i + 2 < sz(ps) || j + 2 < sz(qs);) { + for (ll i = 0, j = 0; i+2 < ssize(ps) || j+2 < ssize(qs);) { res.push_back(ps[i] + qs[j]); auto c = cross(ps[i + 1] - ps[i], qs[j + 1] - qs[j]); if(c >= 0) i++; @@ -83,22 +81,22 @@ double dist(const vector<pt>& ps, vector<pt> qs) { p.push_back(p[0]); double res = INF; bool intersect = true; - for (ll i = 0; i + 1 < sz(p); i++) { + for (ll i = 0; i + 1 < ssize(p); i++) { intersect &= cross(p[i], p[i+1]) >= 0; res = min(res, distToSegment(p[i], p[i+1], 0)); } return intersect ? 0 : res; } -bool left(pt of, pt p) {return cross(p, of) < 0 || - (cross(p, of) == 0 && dot(p, of) > 0);} +bool left(pt of, pt p) { return cross(p, of) < 0 || + (cross(p, of) == 0 && dot(p, of) > 0); } // convex hulls without duplicates, hull[0] == hull.back() and // hull[0] must be a convex point (with angle < pi) // returns index of corner where dot(dir, corner) is maximized int extremal(const vector<pt>& hull, pt dir) { dir *= pt(0, 1); - int l = 0, r = sz(hull) - 1; + int l = 0, r = ssize(hull) - 1; while (l + 1 < r) { int m = (l + r) / 2; pt dm = hull[m+1]-hull[m]; @@ -110,7 +108,7 @@ int extremal(const vector<pt>& hull, pt dir) { if (cross(dir, dm) < 0) l = m; else r = m; }} - return r % (sz(hull) - 1); + return r % (ssize(hull) - 1); } // convex hulls without duplicates, hull[0] == hull.back() and @@ -126,7 +124,7 @@ vector<int> intersectLine(const vector<pt>& hull, pt a, pt b) { if (cross(hull[endA], a, b) > 0 || cross(hull[endB], a, b) < 0) return {}; - int n = sz(hull) - 1; + int n = ssize(hull) - 1; vector<int> res; for (auto _ : {0, 1}) { int l = endA, r = endB; diff --git a/content/geometry/segmentIntersection.cpp b/content/geometry/segmentIntersection.cpp index afc01b2..9fdbdb8 100644 --- a/content/geometry/segmentIntersection.cpp +++ b/content/geometry/segmentIntersection.cpp @@ -39,10 +39,10 @@ pair<int, int> intersect(vector<seg>& segs) { events.push_back({s.a, s.id, 1}); events.push_back({s.b, s.id, -1}); } - sort(all(events)); + ranges::sort(events, less{}); set<seg> q; - vector<set<seg>::iterator> where(sz(segs)); + vector<set<seg>::iterator> where(ssize(segs)); for (auto e : events) { int id = e.id; if (e.type > 0) { diff --git a/content/geometry/sortAround.cpp b/content/geometry/sortAround.cpp index 98d17a8..7e9d1de 100644 --- a/content/geometry/sortAround.cpp +++ b/content/geometry/sortAround.cpp @@ -1,11 +1,11 @@ -bool left(pt p) {return real(p) < 0 ||
- (real(p) == 0 && imag(p) < 0);}
-
-// counter clockwise, starting with "11:59"
-void sortAround(pt p, vector<pt>& ps) {
- sort(all(ps), [&](const pt& a, const pt& b){
- if (left(a - p) != left(b - p))
- return left(a - p) > left(b - p);
- return cross(p, a, b) > 0;
- });
-}
+bool left(pt p) { return real(p) < 0 || + (real(p) == 0 && imag(p) < 0); } + +// counter clockwise, starting with "11:59" +void sortAround(pt p, vector<pt>& ps) { + ranges::sort(ps, [&](const pt& a, const pt& b){ + if (left(a - p) != left(b - p)) + return left(a - p) > left(b - p); + return cross(p, a, b) > 0; + }); +} diff --git a/content/geometry/triangle.cpp b/content/geometry/triangle.cpp index 534bb10..eab17f4 100644 --- a/content/geometry/triangle.cpp +++ b/content/geometry/triangle.cpp @@ -1,5 +1,5 @@ // Mittelpunkt des Dreiecks abc. -pt centroid(pt a, pt b, pt c) {return (a + b + c) / 3.0;} +pt centroid(pt a, pt b, pt c) { return (a + b + c) / 3.0; } // Flächeninhalt eines Dreicks bei bekannten Eckpunkten. double area(pt a, pt b, pt c) { @@ -30,7 +30,7 @@ pt circumCenter(pt a, pt b, pt c) { // -1 => p außerhalb Kreis durch a,b,c // 0 => p auf Kreis durch a,b,c // 1 => p im Kreis durch a,b,c -int insideOutCenter(pt a, pt b, pt c, pt p) {// braucht lll +int insideOutCenter(pt a, pt b, pt c, pt p) { // braucht lll return ccw(a,b,c) * sgn(imag((c-b)*conj(p-c)*(a-p)*conj(b-a))); } diff --git a/content/graph/2sat.cpp b/content/graph/2sat.cpp index 75e54e6..2b49fc6 100644 --- a/content/graph/2sat.cpp +++ b/content/graph/2sat.cpp @@ -4,19 +4,19 @@ struct sat2 { sat2(int vars) : n(vars*2), adj(n) {} - static int var(int i) {return i << 1;} // use this! + static int var(int i) { return i << 1; } // use this! void addImpl(int a, int b) { adj[a].push_back(b); adj[1^b].push_back(1^a); } - void addEquiv(int a, int b) {addImpl(a, b); addImpl(b, a);} - void addOr(int a, int b) {addImpl(1^a, b);} - void addXor(int a, int b) {addOr(a, b); addOr(1^a, 1^b);} - void addTrue(int a) {addImpl(1^a, a);} - void addFalse(int a) {addTrue(1^a);} - void addAnd(int a, int b) {addTrue(a); addTrue(b);} - void addNand(int a, int b) {addOr(1^a, 1^b);} + void addEquiv(int a, int b) { addImpl(a, b); addImpl(b, a); } + void addOr(int a, int b) { addImpl(1^a, b);} + void addXor(int a, int b) { addOr(a, b); addOr(1^a, 1^b); } + void addTrue(int a) { addImpl(1^a, a);} + void addFalse(int a) { addTrue(1^a);} + void addAnd(int a, int b) { addTrue(a); addTrue(b); } + void addNand(int a, int b) { addOr(1^a, 1^b); } bool solve() { scc(); //scc code von oben diff --git a/content/graph/LCA_sparse.cpp b/content/graph/LCA_sparse.cpp index 221b5ed..1da8876 100644 --- a/content/graph/LCA_sparse.cpp +++ b/content/graph/LCA_sparse.cpp @@ -5,12 +5,12 @@ struct LCA { SparseTable st; //sparse table @\sourceref{datastructures/sparseTable.cpp}@ void init(vector<vector<int>>& adj, int root) { - depth.assign(2 * sz(adj), 0); - visited.assign(2 * sz(adj), -1); - first.assign(sz(adj), 2 * sz(adj)); + depth.assign(2 * ssize(adj), 0); + visited.assign(2 * ssize(adj), -1); + first.assign(ssize(adj), 2 * ssize(adj)); idx = 0; dfs(adj, root); - st.init(&depth); + st.init(depth); } void dfs(vector<vector<int>>& adj, int v, ll d=0) { @@ -18,15 +18,15 @@ struct LCA { first[v] = min(idx, first[v]), idx++; for (int u : adj[v]) { - if (first[u] == 2 * sz(adj)) { + if (first[u] == 2 * ssize(adj)) { dfs(adj, u, d + 1); visited[idx] = v, depth[idx] = d, idx++; }}} int getLCA(int u, int v) { if (first[u] > first[v]) swap(u, v); - return visited[st.queryIdempotent(first[u], first[v] + 1)]; + return visited[st.query(first[u], first[v] + 1)]; } - ll getDepth(int v) {return depth[first[v]];} + ll getDepth(int v) { return depth[first[v]]; } }; diff --git a/content/graph/TSP.cpp b/content/graph/TSP.cpp index 6223858..4d2479c 100644 --- a/content/graph/TSP.cpp +++ b/content/graph/TSP.cpp @@ -1,7 +1,7 @@ vector<vector<ll>> dist; // Entfernung zwischen je zwei Punkten. auto TSP() { - int n = sz(dist), m = 1 << n; + int n = ssize(dist), m = 1 << n; vector<vector<edge>> dp(n, vector<edge>(m, edge{INF, -1})); for (int c = 0; c < n; c++) @@ -21,7 +21,7 @@ auto TSP() { vector<int> tour = {0}; int v = 0; - while (tour.back() != 0 || sz(tour) == 1) + while (tour.back() != 0 || ssize(tour) == 1) tour.push_back(dp[tour.back()] [(v |= (1 << tour.back()))].to); // Enthält Knoten 0 zweimal. An erster und letzter Position. diff --git a/content/graph/articulationPoints.cpp b/content/graph/articulationPoints.cpp index 25ff67e..60970e6 100644 --- a/content/graph/articulationPoints.cpp +++ b/content/graph/articulationPoints.cpp @@ -14,14 +14,14 @@ int dfs(int v, int from = -1) { if (num[e.to] < me) st.push_back(e); } else { if (v == root) rootCount++; - int si = sz(st); + int si = ssize(st); int up = dfs(e.to, e.id); top = min(top, up); if (up >= me) isArt[v] = true; if (up > me) bridges.push_back(e); if (up <= me) st.push_back(e); if (up == me) { - bcc.emplace_back(si + all(st)); + bcc.emplace_back(begin(st) + si, end(st)); st.resize(si); }}} return top; @@ -29,12 +29,12 @@ int dfs(int v, int from = -1) { void find() { counter = 0; - num.assign(sz(adj), 0); - isArt.assign(sz(adj), false); + num.assign(ssize(adj), 0); + isArt.assign(ssize(adj), false); bridges.clear(); st.clear(); bcc.clear(); - for (int v = 0; v < sz(adj); v++) { + for (int v = 0; v < ssize(adj); v++) { if (!num[v]) { root = v; rootCount = 0; diff --git a/content/graph/binary_lifting.cpp b/content/graph/binary_lifting.cpp new file mode 100644 index 0000000..f88b1a9 --- /dev/null +++ b/content/graph/binary_lifting.cpp @@ -0,0 +1,28 @@ +struct Lift { + vector<int> dep, par, jmp; + + Lift(vector<vector<int>> &adj, int root): + dep(adj.size()), par(adj.size()), jmp(adj.size(), root) { + auto dfs = [&](auto &self, int u, int p, int d) -> void { + dep[u] = d, par[u] = p; + jmp[u] = dep[p] + dep[jmp[jmp[p]]] == 2*dep[jmp[p]] + ? jmp[jmp[p]] : p; + for (int v: adj[u]) if (v != p) self(self, v, u, d+1); + }; + dfs(dfs, root, root, 0); + } + + int depth(int v) { return dep[v]; } + int lift(int v, int d) { + while (dep[v] > d) v = dep[jmp[v]] < d ? par[v] : jmp[v]; + return v; + } + int lca(int u, int v) { + v = lift(v, dep[u]), u = lift(u, dep[v]); + while (u != v) { + auto &a = jmp[u] == jmp[v] ? par : jmp; + u = a[u], v = a[v]; + } + return u; + } +}; diff --git a/content/graph/bitonicTSP.cpp b/content/graph/bitonicTSP.cpp index eee5082..eeff156 100644 --- a/content/graph/bitonicTSP.cpp +++ b/content/graph/bitonicTSP.cpp @@ -1,10 +1,10 @@ vector<vector<double>> dist; // Initialisiere mit Entfernungen zwischen Punkten. auto bitonicTSP() { - vector<double> dp(sz(dist), HUGE_VAL); - vector<int> pre(sz(dist)); // nur für Tour + vector<double> dp(ssize(dist), HUGE_VAL); + vector<int> pre(ssize(dist)); // nur für Tour dp[0] = 0; dp[1] = 2 * dist[0][1]; pre[1] = 0; - for (unsigned int i = 2; i < sz(dist); i++) { + for (unsigned int i = 2; i < ssize(dist); i++) { double link = 0; for (int j = i - 2; j >= 0; j--) { link += dist[j + 1][j + 2]; @@ -13,9 +13,9 @@ auto bitonicTSP() { dp[i] = opt; pre[i] = j; }}} - // return dp.back(); // Länger der Tour + // return dp.back(); // Länge der Tour - int j, n = sz(dist) - 1; + int j, n = ssize(dist) - 1; vector<int> ut, lt = {n, n - 1}; do { j = pre[n]; @@ -25,7 +25,7 @@ auto bitonicTSP() { } } while(n = j + 1, j > 0); (lt.back() == 1 ? lt : ut).push_back(0); - reverse(all(lt)); - lt.insert(lt.end(), all(ut)); + ranges::reverse(lt); + lt.insert(end(lt), begin(ut), end(ut)); return lt; // Enthält Knoten 0 zweimal. An erster und letzter Position. } diff --git a/content/graph/bitonicTSPsimple.cpp b/content/graph/bitonicTSPsimple.cpp index cacfb9c..b6d72d8 100644 --- a/content/graph/bitonicTSPsimple.cpp +++ b/content/graph/bitonicTSPsimple.cpp @@ -3,7 +3,7 @@ vector<vector<double>> dp; double get(int p1, int p2) { int v = max(p1, p2) + 1; - if (v == sz(dist)) return dist[p1][v - 1] + dist[p2][v - 1]; + if (v == ssize(dist)) return dist[p1][v - 1] + dist[p2][v - 1]; if (dp[p1][p2] >= 0.0) return dp[p1][p2]; double tryLR = dist[p1][v] + get(v, p2); double tryRL = dist[p2][v] + get(p1, v); @@ -11,17 +11,19 @@ double get(int p1, int p2) { } auto bitonicTSP() { - dp = vector<vector<double>>(sz(dist), - vector<double>(sz(dist), -1)); + dp = vector<vector<double>>(ssize(dist), + vector<double>(ssize(dist), -1)); get(0, 0); - // return dp[0][0]; // Länger der Tour + // return dp[0][0]; // Länge der Tour vector<int> lr = {0}, rl = {0}; - for (int p1 = 0, p2 = 0, v; (v = max(p1, p2)+1) < sz(dist);) { + for (int p1 = 0, p2 = 0, v; + (v = max(p1, p2)+1) < ssize(dist);) { if (dp[p1][p2] == dist[p1][v] + dp[v][p2]) { lr.push_back(v); p1 = v; } else { rl.push_back(v); p2 = v; }} lr.insert(lr.end(), rl.rbegin(), rl.rend()); - return lr; // Enthält Knoten 0 zweimal. An erster und letzter Position. + // Enthält Knoten 0 zweimal. An erster und letzter Position. + return lr; } diff --git a/content/graph/blossom.cpp b/content/graph/blossom.cpp index 7bd494a..3c9bd31 100644 --- a/content/graph/blossom.cpp +++ b/content/graph/blossom.cpp @@ -32,7 +32,7 @@ struct GM { auto h = label[r] = label[s] = {~x, y}; int join; while (true) { - if (s != sz(adj)) swap(r, s); + if (s != ssize(adj)) swap(r, s); r = findFirst(label[pairs[r]].first); if (label[r] == h) { join = r; @@ -48,13 +48,13 @@ struct GM { }}} bool augment(int v) { - label[v] = {sz(adj), -1}; - first[v] = sz(adj); + label[v] = {ssize(adj), -1}; + first[v] = ssize(adj); head = tail = 0; for (que[tail++] = v; head < tail;) { int x = que[head++]; for (int y : adj[x]) { - if (pairs[y] == sz(adj) && y != v) { + if (pairs[y] == ssize(adj) && y != v) { pairs[y] = x; rematch(x, y); return true; @@ -70,12 +70,12 @@ struct GM { int match() { int matching = head = tail = 0; - for (int v = 0; v < sz(adj); v++) { - if (pairs[v] < sz(adj) || !augment(v)) continue; + for (int v = 0; v < ssize(adj); v++) { + if (pairs[v] < ssize(adj) || !augment(v)) continue; matching++; for (int i = 0; i < tail; i++) label[que[i]] = label[pairs[que[i]]] = {-1, -1}; - label[sz(adj)] = {-1, -1}; + label[ssize(adj)] = {-1, -1}; } return matching; } diff --git a/content/graph/bronKerbosch.cpp b/content/graph/bronKerbosch.cpp index 0cfcc5f..cf07c88 100644 --- a/content/graph/bronKerbosch.cpp +++ b/content/graph/bronKerbosch.cpp @@ -11,7 +11,7 @@ void bronKerboschRec(bits R, bits P, bits X) { } else { int q = min(P._Find_first(), X._Find_first()); bits cands = P & ~adj[q]; - for (int i = 0; i < sz(adj); i++) if (cands[i]) { + for (int i = 0; i < ssize(adj); i++) if (cands[i]) { R[i] = 1; bronKerboschRec(R, P & adj[i], X & adj[i]); R[i] = P[i] = 0; @@ -20,5 +20,5 @@ void bronKerboschRec(bits R, bits P, bits X) { void bronKerbosch() { cliques.clear(); - bronKerboschRec({}, {(1ull << sz(adj)) - 1}, {}); + bronKerboschRec({}, {(1ull << ssize(adj)) - 1}, {}); } diff --git a/content/graph/centroid.cpp b/content/graph/centroid.cpp index 820945b..3cd5519 100644 --- a/content/graph/centroid.cpp +++ b/content/graph/centroid.cpp @@ -15,7 +15,7 @@ pair<int, int> dfs_cent(int v, int from, int n) { } pair<int, int> find_centroid(int root = 0) { - s.resize(sz(adj)); + s.resize(ssize(adj)); dfs_sz(root); return dfs_cent(root, -1, s[root]); } diff --git a/content/graph/cycleCounting.cpp b/content/graph/cycleCounting.cpp index 6a299ee..deac71e 100644 --- a/content/graph/cycleCounting.cpp +++ b/content/graph/cycleCounting.cpp @@ -9,8 +9,8 @@ struct cycles { cycles(int n) : adj(n), seen(n), paths(n) {} void addEdge(int u, int v) { - adj[u].push_back({v, sz(edges)}); - adj[v].push_back({u, sz(edges)}); + adj[u].push_back({v, ssize(edges)}); + adj[v].push_back({u, ssize(edges)}); edges.push_back({u, v}); } @@ -36,10 +36,10 @@ struct cycles { cur[id].flip(); }}} - bool isCycle(cycle cur) {//cycle must be constrcuted from base + bool isCycle(cycle cur) {// cycle must be constructed from base if (cur.none()) return false; - init(sz(adj)); // union find @\sourceref{datastructures/unionFind.cpp}@ - for (int i = 0; i < sz(edges); i++) { + init(ssize(adj)); // union find @\sourceref{datastructures/unionFind.cpp}@ + for (int i = 0; i < ssize(edges); i++) { if (cur[i]) { cur[i] = false; if (findSet(edges[i].first) == @@ -50,12 +50,12 @@ struct cycles { } int count() { - for (int i = 0; i < sz(adj); i++) findBase(i); - assert(sz(base) < 30); + for (int i = 0; i < ssize(adj); i++) findBase(i); + assert(ssize(base) < 30); int res = 0; - for (int i = 1; i < (1 << sz(base)); i++) { + for (int i = 1; i < (1 << ssize(base)); i++) { cycle cur; - for (int j = 0; j < sz(base); j++) + for (int j = 0; j < ssize(base); j++) if (((i >> j) & 1) != 0) cur ^= base[j]; if (isCycle(cur)) res++; } diff --git a/content/graph/dijkstra.cpp b/content/graph/dijkstra.cpp index 61c636d..ab4bef9 100644 --- a/content/graph/dijkstra.cpp +++ b/content/graph/dijkstra.cpp @@ -1,21 +1,18 @@ -using path = pair<ll, int>; //dist, destination +using Dist = ll; -auto dijkstra(const vector<vector<path>>& adj, int start) { - priority_queue<path, vector<path>, greater<path>> pq; - vector<ll> dist(sz(adj), INF); - vector<int> prev(sz(adj), -1); - dist[start] = 0; pq.emplace(0, start); +auto dijkstra(vector<vector<pair<int, Dist>>> &adj, int start) { + priority_queue<pair<Dist, int>> pq; + vector<Dist> dist(ssize(adj), INF); + dist[start] = 0, pq.emplace(0, start); - while (!pq.empty()) { - auto [dv, v] = pq.top(); pq.pop(); - if (dv > dist[v]) continue; // WICHTIG! + while (!empty(pq)) { + auto [du, u] = pq.top(); + du = -du, pq.pop(); + if (du > dist[u]) continue; // WICHTIG! - for (auto [du, u] : adj[v]) { - ll newDist = dv + du; - if (newDist < dist[u]) { - dist[u] = newDist; - prev[u] = v; - pq.emplace(dist[u], u); - }}} - return dist; //return prev; + for (auto [v, d]: adj[u]) { + Dist dv = du + d; + if (dv < dist[v]) dist[v] = dv, pq.emplace(-dv, v); + }} + return dist; } diff --git a/content/graph/dinic.cpp b/content/graph/dinic.cpp index 2e58a2d..c8c34a8 100644 --- a/content/graph/dinic.cpp +++ b/content/graph/dinic.cpp @@ -8,12 +8,12 @@ int s, t; vector<int> pt, dist; void addEdge(int u, int v, ll c) { - adj[u].push_back({v, (int)sz(adj[v]), 0, c}); - adj[v].push_back({u, (int)sz(adj[u]) - 1, 0, 0}); + adj[u].push_back({v, (int)ssize(adj[v]), 0, c}); + adj[v].push_back({u, (int)ssize(adj[u]) - 1, 0, 0}); } bool bfs() { - dist.assign(sz(adj), -1); + dist.assign(ssize(adj), -1); dist[s] = 0; queue<int> q({s}); while (!q.empty() && dist[t] < 0) { @@ -28,7 +28,7 @@ bool bfs() { ll dfs(int v, ll flow = INF) { if (v == t || flow == 0) return flow; - for (; pt[v] < sz(adj[v]); pt[v]++) { + for (; pt[v] < ssize(adj[v]); pt[v]++) { Edge& e = adj[v][pt[v]]; if (dist[e.to] != dist[v] + 1) continue; ll cur = dfs(e.to, min(e.c - e.f, flow)); @@ -44,7 +44,7 @@ ll maxFlow(int source, int target) { s = source, t = target; ll flow = 0; while (bfs()) { - pt.assign(sz(adj), 0); + pt.assign(ssize(adj), 0); ll cur; do { cur = dfs(s); diff --git a/content/graph/dinicScaling.cpp b/content/graph/dinicScaling.cpp index 0974b78..0082c05 100644 --- a/content/graph/dinicScaling.cpp +++ b/content/graph/dinicScaling.cpp @@ -8,12 +8,12 @@ int s, t; vector<int> pt, dist; void addEdge(int u, int v, ll c) { - adj[u].push_back({v, (int)sz(adj[v]), 0, c}); - adj[v].push_back({u, (int)sz(adj[u]) - 1, 0, 0}); + adj[u].push_back({v, (int)ssize(adj[v]), 0, c}); + adj[v].push_back({u, (int)ssize(adj[u]) - 1, 0, 0}); } bool bfs(ll lim) { - dist.assign(sz(adj), -1); + dist.assign(ssize(adj), -1); dist[s] = 0; queue<int> q({s}); while (!q.empty() && dist[t] < 0) { @@ -28,7 +28,7 @@ bool bfs(ll lim) { ll dfs(int v, ll flow) { if (v == t || flow == 0) return flow; - for (; pt[v] < sz(adj[v]); pt[v]++) { + for (; pt[v] < ssize(adj[v]); pt[v]++) { Edge& e = adj[v][pt[v]]; if (dist[e.to] != dist[v] + 1) continue; ll cur = dfs(e.to, min(e.c - e.f, flow)); @@ -45,7 +45,7 @@ ll maxFlow(int source, int target) { ll flow = 0; for (ll lim = (1LL << 62); lim >= 1; lim /= 2) { while (bfs(lim)) { - pt.assign(sz(adj), 0); + pt.assign(ssize(adj), 0); ll cur; do { cur = dfs(s, lim); diff --git a/content/graph/euler.cpp b/content/graph/euler.cpp index e81cebe..d45dac0 100644 --- a/content/graph/euler.cpp +++ b/content/graph/euler.cpp @@ -2,8 +2,8 @@ vector<vector<pair<int, int>>> adj; // gets destroyed! vector<int> cycle; void addEdge(int u, int v) { - adj[u].emplace_back(v, sz(adj[v])); - adj[v].emplace_back(u, sz(adj[u]) - 1); // remove for directed + adj[u].emplace_back(v, ssize(adj[v])); + adj[v].emplace_back(u, ssize(adj[u]) - 1); // remove for directed } void euler(int v) { diff --git a/content/graph/floydWarshall.cpp b/content/graph/floydWarshall.cpp index df096c2..1a1138d 100644 --- a/content/graph/floydWarshall.cpp +++ b/content/graph/floydWarshall.cpp @@ -2,16 +2,16 @@ vector<vector<ll>> dist; // Entfernung zwischen je zwei Punkten. vector<vector<int>> next; void floydWarshall() { - next.assign(sz(dist), vector<int>(sz(dist), -1)); - for (int i = 0; i < sz(dist); i++) { - for (int j = 0; j < sz(dist); j++) { + next.assign(ssize(dist), vector<int>(ssize(dist), -1)); + for (int i = 0; i < ssize(dist); i++) { + for (int j = 0; j < ssize(dist); j++) { if (dist[i][j] < INF) { next[i][j] = j; }}} - for (int k = 0; k < sz(dist); k++) { - for (int i = 0; i < sz(dist); i++) { - for (int j = 0; j < sz(dist); j++) { + for (int k = 0; k < ssize(dist); k++) { + for (int i = 0; i < ssize(dist); i++) { + for (int j = 0; j < ssize(dist); j++) { // only needed if dist can be negative if (dist[i][k] == INF || dist[k][j] == INF) continue; if (dist[i][j] > dist[i][k] + dist[k][j]) { diff --git a/content/graph/graph.tex b/content/graph/graph.tex index 213c597..6e8e20b 100644 --- a/content/graph/graph.tex +++ b/content/graph/graph.tex @@ -1,12 +1,5 @@ \section{Graphen} -\begin{algorithm}{Kruskal} - \begin{methods}[ll] - berechnet den Minimalen Spannbaum & \runtime{\abs{E}\cdot\log(\abs{E})} \\ - \end{methods} - \sourcecode{graph/kruskal.cpp} -\end{algorithm} - \begin{algorithm}{Minimale Spannbäume} \paragraph{Schnitteigenschaft} Für jeden Schnitt $C$ im Graphen gilt: @@ -16,6 +9,12 @@ \paragraph{Kreiseigenschaft} Für jeden Kreis $K$ im Graphen gilt: Die schwerste Kante auf dem Kreis ist nicht Teil des minimalen Spannbaums. + + \subsection{\textsc{Kruskal}} + \begin{methods}[ll] + berechnet den Minimalen Spannbaum & \runtime{\abs{E}\cdot\log(\abs{E})} \\ + \end{methods} + \sourcecode{graph/kruskal.cpp} \end{algorithm} \begin{algorithm}{Heavy-Light Decomposition} @@ -28,7 +27,7 @@ \sourcecode{graph/hld.cpp} \end{algorithm} -\begin{algorithm}{Lowest Common Ancestor} +\begin{algorithm}[optional]{Lowest Common Ancestor} \begin{methods} \method{init}{baut DFS-Baum über $g$ auf}{\abs{V}\*\log(\abs{V})} \method{getLCA}{findet LCA}{1} @@ -37,6 +36,17 @@ \sourcecode{graph/LCA_sparse.cpp} \end{algorithm} +\begin{algorithm}{Binary Lifting} + % https://codeforces.com/blog/entry/74847 + \begin{methods} + \method{Lift}{constructor}{\abs{V}} + \method{depth}{distance to root of vertex $v$}{1} + \method{lift}{vertex above $v$ at depth $d$}{\log(\abs{V})} + \method{lca}{lowest common ancestor of $u$ and $v$}{\log(\abs{V})} + \end{methods} + \sourcecode{graph/binary_lifting.cpp} +\end{algorithm} + \begin{algorithm}{Centroids} \begin{methods} \method{find\_centroid}{findet alle Centroids des Baums (maximal 2)}{\abs{V}} @@ -99,7 +109,7 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da \sourcecode{graph/connect.cpp} \end{algorithm} -\begin{algorithm}{Erd\H{o}s-Gallai} +\begin{algorithm}{\textsc{Erd\H{o}s-Gallai}} Sei $d_1 \geq \cdots \geq d_{n}$. Es existiert genau dann ein Graph $G$ mit Degreesequence $d$ falls $\sum\limits_{i=1}^{n} d_i$ gerade ist und für $1\leq k \leq n$: $\sum\limits_{i=1}^{k} d_i \leq k\cdot(k-1)+\sum\limits_{i=k+1}^{n} \min(d_i, k)$ \begin{methods} \method{havelHakimi}{findet Graph}{(\abs{V}+\abs{E})\cdot\log(\abs{V})} @@ -170,7 +180,7 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da \sourcecode{graph/virtualTree.cpp} \end{algorithm} -\begin{algorithm}{Maximum Cardinatlity Bipartite Matching} +\begin{algorithm}{Maximum Cardinality Bipartite Matching} \label{kuhn} \begin{methods} \method{kuhn}{berechnet Matching}{\abs{V}\*\min(ans^2, \abs{E})} @@ -178,7 +188,7 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da \begin{itemize} \item die ersten [0..l) Knoten in \code{adj} sind die linke Seite des Graphen \end{itemize} - \sourcecode{graph/maxCarBiMatch.cpp} + \sourcecode{graph/kuhn.cpp} \begin{methods} \method{hopcroft\_karp}{berechnet Matching}{\sqrt{\abs{V}}\*\abs{E}} \end{methods} @@ -197,7 +207,7 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da \subsection{Max-Flow} \optional{ -\subsubsection{Push Relabel} +\subsubsection{Push Relabel \opthint} \begin{methods} \method{maxFlow}{gut bei sehr dicht besetzten Graphen.}{\abs{V}^2\*\sqrt{\abs{E}}} \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} @@ -205,24 +215,23 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da \sourcecode{graph/pushRelabel.cpp} } +\subsubsection{\textsc{Dinic}'s Algorithm mit Capacity Scaling} +\begin{methods} + \method{maxFlow}{doppelt so schnell wie \textsc{Ford-Fulkerson}}{\abs{V}^2\cdot\abs{E}} + \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} +\end{methods} +\sourcecode{graph/dinicScaling.cpp} + \begin{algorithm}{Min-Cost-Max-Flow} \begin{methods} \method{mincostflow}{berechnet Fluss}{\abs{V}^2\cdot\abs{E}^2} \end{methods} \sourcecode{graph/minCostMaxFlow.cpp} \end{algorithm} -\vfill\null \columnbreak -\subsubsection{Dinic's Algorithm mit Capacity Scaling} -\begin{methods} - \method{maxFlow}{doppelt so schnell wie Ford Fulkerson}{\abs{V}^2\cdot\abs{E}} - \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} -\end{methods} -\sourcecode{graph/dinicScaling.cpp} - \optional{ -\subsubsection{Anwendungen} +\subsubsection{Anwendungen \opthint} \begin{itemize} \item \textbf{Maximum Edge Disjoint Paths}\newline Finde die maximale Anzahl Pfade von $s$ nach $t$, die keine Kante teilen. diff --git a/content/graph/havelHakimi.cpp b/content/graph/havelHakimi.cpp index ac4d67d..9f4c081 100644 --- a/content/graph/havelHakimi.cpp +++ b/content/graph/havelHakimi.cpp @@ -1,12 +1,12 @@ vector<vector<int>> havelHakimi(const vector<int>& deg) { priority_queue<pair<int, int>> pq; - for (int i = 0; i < sz(deg); i++) { + for (int i = 0; i < ssize(deg); i++) { if (deg[i] > 0) pq.push({deg[i], i}); } - vector<vector<int>> adj(sz(deg)); + vector<vector<int>> adj(ssize(deg)); while (!pq.empty()) { auto [degV, v] = pq.top(); pq.pop(); - if (sz(pq) < degV) return {}; //impossible + if (ssize(pq) < degV) return {}; //impossible vector<pair<int, int>> todo(degV); for (auto& e : todo) e = pq.top(), pq.pop(); for (auto [degU, u] : todo) { diff --git a/content/graph/hld.cpp b/content/graph/hld.cpp index 65d3f5c..e365b13 100644 --- a/content/graph/hld.cpp +++ b/content/graph/hld.cpp @@ -21,7 +21,7 @@ void dfs_hld(int v = 0, int from = -1) { } void init(int root = 0) { - int n = sz(adj); + int n = ssize(adj); sz.assign(n, 1), nxt.assign(n, root), par.assign(n, -1); in.resize(n), out.resize(n); counter = 0; diff --git a/content/graph/hopcroftKarp.cpp b/content/graph/hopcroftKarp.cpp index c1f5d1c..d07bd3a 100644 --- a/content/graph/hopcroftKarp.cpp +++ b/content/graph/hopcroftKarp.cpp @@ -5,14 +5,14 @@ vector<int> pairs, dist, ptr; bool bfs(int l) { queue<int> q; for(int v = 0; v < l; v++) { - if (pairs[v] < 0) {dist[v] = 0; q.push(v);} + if (pairs[v] < 0) { dist[v] = 0; q.push(v); } else dist[v] = -1; } bool exist = false; while(!q.empty()) { int v = q.front(); q.pop(); for (int u : adj[v]) { - if (pairs[u] < 0) {exist = true; continue;} + if (pairs[u] < 0) { exist = true; continue; } if (dist[pairs[u]] < 0) { dist[pairs[u]] = dist[v] + 1; q.push(pairs[u]); @@ -21,7 +21,7 @@ bool bfs(int l) { } bool dfs(int v) { - for (; ptr[v] < sz(adj[v]); ptr[v]++) { + for (; ptr[v] < ssize(adj[v]); ptr[v]++) { int u = adj[v][ptr[v]]; if (pairs[u] < 0 || (dist[pairs[u]] > dist[v] && dfs(pairs[u]))) { @@ -33,7 +33,7 @@ bool dfs(int v) { int hopcroft_karp(int l) { // l = #Knoten links int ans = 0; - pairs.assign(sz(adj), -1); + pairs.assign(ssize(adj), -1); dist.resize(l); // Greedy Matching, optionale Beschleunigung. for (int v = 0; v < l; v++) for (int u : adj[v]) diff --git a/content/graph/kruskal.cpp b/content/graph/kruskal.cpp index 987d30b..d42800d 100644 --- a/content/graph/kruskal.cpp +++ b/content/graph/kruskal.cpp @@ -1,4 +1,4 @@ -sort(all(edges)); +ranges::sort(edges, less{}); vector<Edge> mst; ll cost = 0; for (Edge& e : edges) { diff --git a/content/graph/maxCarBiMatch.cpp b/content/graph/kuhn.cpp index e928387..688c846 100644 --- a/content/graph/maxCarBiMatch.cpp +++ b/content/graph/kuhn.cpp @@ -12,7 +12,7 @@ bool dfs(int v) { } int kuhn(int l) { // l = #Knoten links. - pairs.assign(sz(adj), -1); + pairs.assign(ssize(adj), -1); int ans = 0; // Greedy Matching. Optionale Beschleunigung. for (int v = 0; v < l; v++) for (int u : adj[v]) diff --git a/content/graph/matching.cpp b/content/graph/matching.cpp index dcaea8c..3619d7c 100644 --- a/content/graph/matching.cpp +++ b/content/graph/matching.cpp @@ -3,19 +3,19 @@ vector<vector<ll>> adj, mat; int max_matching() { int ans = 0; - mat.assign(sz(adj), {}); + mat.assign(ssize(adj), {}); for (int _ = 0; _ < I; _++) { - for (int v = 0; v < sz(adj); v++) { - mat[v].assign(sz(adj), 0); + for (int v = 0; v < ssize(adj); v++) { + mat[v].assign(ssize(adj), 0); for (int u : adj[v]) { if (u < v) { mat[v][u] = rand() % (MOD - 1) + 1; mat[u][v] = MOD - mat[v][u]; }}} - gauss(sz(adj), MOD); //LGS @\sourceref{math/lgsFp.cpp}@ + gauss(ssize(adj), MOD); //LGS @\sourceref{math/lgsFp.cpp}@ int rank = 0; for (auto& row : mat) { - if (*max_element(all(row)) != 0) rank++; + if (*ranges::max_element(row) != 0) rank++; } ans = max(ans, rank / 2); } diff --git a/content/graph/maxWeightBipartiteMatching.cpp b/content/graph/maxWeightBipartiteMatching.cpp index a2b0a80..b6f6ddf 100644 --- a/content/graph/maxWeightBipartiteMatching.cpp +++ b/content/graph/maxWeightBipartiteMatching.cpp @@ -45,6 +45,6 @@ double match(int l, int r) { yx[y] = aug[y]; swap(y, xy[aug[y]]); }} - return accumulate(all(lx), 0.0) + - accumulate(all(ly), 0.0); // Wert des Matchings + return accumulate(begin(lx), end(lx), 0.0) + + accumulate(begin(ly), end(ly), 0.0); // Wert des Matchings } diff --git a/content/graph/minCostMaxFlow.cpp b/content/graph/minCostMaxFlow.cpp index 14a222c..fde95f3 100644 --- a/content/graph/minCostMaxFlow.cpp +++ b/content/graph/minCostMaxFlow.cpp @@ -15,16 +15,16 @@ struct MinCostFlow { adj(n), s(source), t(target) {}; void addEdge(int u, int v, ll c, ll cost) { - adj[u].push_back(sz(edges)); + adj[u].push_back(ssize(edges)); edges.push_back({v, c, cost}); - adj[v].push_back(sz(edges)); + adj[v].push_back(ssize(edges)); edges.push_back({u, 0, -cost}); } bool SPFA() { - pref.assign(sz(adj), -1); - dist.assign(sz(adj), INF); - vector<bool> inqueue(sz(adj)); + pref.assign(ssize(adj), -1); + dist.assign(ssize(adj), INF); + vector<bool> inqueue(ssize(adj)); queue<int> queue; dist[s] = 0; queue.push(s); @@ -59,7 +59,7 @@ struct MinCostFlow { }} void mincostflow() { - con.assign(sz(adj), 0); + con.assign(ssize(adj), 0); maxflow = mincost = 0; while (SPFA()) extend(); } diff --git a/content/graph/pushRelabel.cpp b/content/graph/pushRelabel.cpp index ec36026..c569df2 100644 --- a/content/graph/pushRelabel.cpp +++ b/content/graph/pushRelabel.cpp @@ -9,8 +9,8 @@ vector<ll> ec; vector<int> cur, H; void addEdge(int u, int v, ll c) { - adj[u].push_back({v, (int)sz(adj[v]), 0, c}); - adj[v].push_back({u, (int)sz(adj[u])-1, 0, 0}); + adj[u].push_back({v, (int)ssize(adj[v]), 0, c}); + adj[v].push_back({u, (int)ssize(adj[u])-1, 0, 0}); } void addFlow(Edge& e, ll f) { @@ -23,7 +23,7 @@ void addFlow(Edge& e, ll f) { } ll maxFlow(int s, int t) { - int n = sz(adj); + int n = ssize(adj); hs.assign(2*n, {}); ec.assign(n, 0); cur.assign(n, 0); @@ -38,9 +38,9 @@ ll maxFlow(int s, int t) { int v = hs[hi].back(); hs[hi].pop_back(); while (ec[v] > 0) { - if (cur[v] == sz(adj[v])) { + if (cur[v] == ssize(adj[v])) { H[v] = 2*n; - for (int i = 0; i < sz(adj[v]); i++) { + for (int i = 0; i < ssize(adj[v]); i++) { Edge& e = adj[v][i]; if (e.c - e.f > 0 && H[v] > H[e.to] + 1) { diff --git a/content/graph/reroot.cpp b/content/graph/reroot.cpp index 379c839..5a9c9d1 100644 --- a/content/graph/reroot.cpp +++ b/content/graph/reroot.cpp @@ -26,11 +26,11 @@ struct Reroot { pref.push_back(takeChild(v, u, w, dp[u])); } auto suf = pref; - partial_sum(all(pref), pref.begin(), comb); + partial_sum(begin(pref), end(pref), begin(pref), comb); exclusive_scan(suf.rbegin(), suf.rend(), suf.rbegin(), E, comb); - for (int i = 0; i < sz(adj[v]); i++) { + for (int i = 0; i < ssize(adj[v]); i++) { auto [u, w] = adj[v][i]; if (u == from) continue; dp[v] = fin(v, comb(pref[i], suf[i + 1])); @@ -40,7 +40,7 @@ struct Reroot { } auto solve() { - dp.assign(sz(adj), E); + dp.assign(ssize(adj), E); dfs0(0); dfs1(0); return dp; diff --git a/content/graph/scc.cpp b/content/graph/scc.cpp index 32f1099..6887712 100644 --- a/content/graph/scc.cpp +++ b/content/graph/scc.cpp @@ -23,11 +23,11 @@ void visit(int v) { }}} void scc() { - inStack.assign(sz(adj), false); - low.assign(sz(adj), -1); - idx.assign(sz(adj), -1); + inStack.assign(ssize(adj), false); + low.assign(ssize(adj), -1); + idx.assign(ssize(adj), -1); counter = sccCounter = 0; - for (int i = 0; i < sz(adj); i++) { + for (int i = 0; i < ssize(adj); i++) { if (low[i] < 0) visit(i); }} diff --git a/content/graph/stoerWagner.cpp b/content/graph/stoerWagner.cpp index 97e667a..a122488 100644 --- a/content/graph/stoerWagner.cpp +++ b/content/graph/stoerWagner.cpp @@ -7,7 +7,7 @@ vector<vector<Edge>> adj, tmp; vector<bool> erased; void merge(int u, int v) { - tmp[u].insert(tmp[u].end(), all(tmp[v])); + tmp[u].insert(end(tmp[u]), begin(tmp[v]), end(tmp[v])); tmp[v].clear(); erased[v] = true; for (auto& vec : tmp) { @@ -19,33 +19,33 @@ void merge(int u, int v) { ll stoer_wagner() { ll res = INF; tmp = adj; - erased.assign(sz(tmp), false); - for (int i = 1; i < sz(tmp); i++) { + erased.assign(ssize(tmp), false); + for (int i = 1; i < ssize(tmp); i++) { int s = 0; while (erased[s]) s++; priority_queue<pair<ll, int>> pq; pq.push({0, s}); - vector<ll> con(sz(tmp)); + vector<ll> con(ssize(tmp)); ll cur = 0; vector<pair<ll, int>> state; while (!pq.empty()) { int c = pq.top().second; pq.pop(); - if (con[c] < 0) continue; //already seen + if (con[c] < 0) continue; // already seen con[c] = -1; for (auto e : tmp[c]) { - if (con[e.to] >= 0) {//add edge to cut + if (con[e.to] >= 0) { // add edge to cut con[e.to] += e.cap; pq.push({con[e.to], e.to}); cur += e.cap; - } else if (e.to != c) {//remove edge from cut + } else if (e.to != c) { // remove edge from cut cur -= e.cap; }} state.push_back({cur, c}); } int t = state.back().second; state.pop_back(); - if (state.empty()) return 0; //graph is not connected?! + if (state.empty()) return 0; // graph is not connected?! merge(state.back().second, t); res = min(res, state.back().first); } diff --git a/content/graph/treeIsomorphism.cpp b/content/graph/treeIsomorphism.cpp index 355fefb..8c2ca21 100644 --- a/content/graph/treeIsomorphism.cpp +++ b/content/graph/treeIsomorphism.cpp @@ -7,9 +7,9 @@ int treeLabel(int v, int from = -1) { if (u == from) continue; children.push_back(treeLabel(u, v)); } - sort(all(children)); + ranges::sort(children); if (known.find(children) == known.end()) { - known[children] = sz(known); + known[children] = ssize(known); } return known[children]; } diff --git a/content/graph/virtualTree.cpp b/content/graph/virtualTree.cpp index 6233b27..81ba001 100644 --- a/content/graph/virtualTree.cpp +++ b/content/graph/virtualTree.cpp @@ -2,14 +2,14 @@ vector<int> in, out; void virtualTree(vector<int> ind) { // indices of used nodes - sort(all(ind), [&](int x, int y) {return in[x] < in[y];}); - for (int i = 1, n = sz(ind); i < n; i++) { + ranges::sort(ind, {}, [&](int x) { return in[x]; }); + for (int i = 1, n = ssize(ind); i < n; i++) { ind.push_back(lca(ind[i - 1], ind[i])); } - sort(all(ind), [&](int x, int y) {return in[x] < in[y];}); - ind.erase(unique(all(ind)), ind.end()); + ranges::sort(ind, {}, [&](int x) { return in[x]; }); + ind.erase(begin(ranges::unique(ind)), end(ind)); - int n = sz(ind); + int n = ssize(ind); vector<vector<int>> tree(n); vector<int> st = {0}; for (int i = 1; i < n; i++) { diff --git a/content/latexHeaders/code.sty b/content/latexHeaders/code.sty index 3ebdda3..8a600c5 100644 --- a/content/latexHeaders/code.sty +++ b/content/latexHeaders/code.sty @@ -1,3 +1,6 @@ +\usepackage{ocgx2} +\usepackage{fontawesome} + % Colors, used for syntax highlighting. % To print this document, set all colors to black! \usepackage{xcolor} @@ -101,6 +104,32 @@ % \addtocounter{lstnumber}{-1}% %} +\ifthenelse{\isundefined{\srclink}}{}{ + \lst@AddToHook{Init}{% + \ifthenelse{\equal{\lst@name}{}}{}{% + \begin{minipage}[t][0pt]{\linewidth}% + \vspace{0pt}% + \hfill% + \begin{ocg}[printocg=never]{Source links}{srclinks}{1}% + \hfill\href{\srclink{\lst@name}}{\faExternalLink}% + \end{ocg}% + \end{minipage}% + }% + } +} + +\lst@AddToHook{DeInit}{% + \ifthenelse{\equal{\lst@name}{}}{}{% + \begin{minipage}[b][0pt]{\linewidth}% + \vspace{0pt}% + \hfill% + \begin{ocg}[printocg=never]{Source file names}{srcfiles}{0}% + \hfill\textcolor{gray}{\lst@name}% + \end{ocg}% + \end{minipage}% + }% +} + \newenvironment{btHighlight}[1][] {\begingroup\tikzset{bt@Highlight@par/.style={#1}}\begin{lrbox}{\@tempboxa}} {\end{lrbox}\bt@HL@box[bt@Highlight@par]{\@tempboxa}\endgroup} diff --git a/content/latexHeaders/commands.sty b/content/latexHeaders/commands.sty index edbba1b..73a7dca 100644 --- a/content/latexHeaders/commands.sty +++ b/content/latexHeaders/commands.sty @@ -7,6 +7,11 @@ \newcommand{\code}[1]{\lstinline[breaklines=true]{#1}} \let\codeSafe\lstinline +\ifoptional + \renewcommand{\columnbreak}{} + \newcommand\opthint{\textcolor{gray}{(optional)}} +\fi + \usepackage{tikz} \usetikzlibrary{angles,quotes} @@ -17,7 +22,7 @@ \ifthenelse{\equal{#1}{optional}}{% \optional{ \needspace{4\baselineskip}% - \subsection{#2\textcolor{gray}{(optional)}}% + \subsection{#2 \opthint}% #3% } }{% diff --git a/content/latexmk.opt b/content/latexmk.opt new file mode 100644 index 0000000..88d3463 --- /dev/null +++ b/content/latexmk.opt @@ -0,0 +1,2 @@ +$jobname = 'tcr-opt'; +$pre_tex_code .= '\def\OPTIONAL{}' diff --git a/content/latexmkrc b/content/latexmkrc new file mode 100644 index 0000000..b43f9a2 --- /dev/null +++ b/content/latexmkrc @@ -0,0 +1,13 @@ +@default_files = qw(tcr); +$pdf_mode = 1; +$aux_dir = "."; +$out_dir = ".."; +{ + my $commit = `git rev-parse HEAD`; + chomp $commit; + $pre_tex_code .= + '\newcommand{\srclink}[1]' + .'{https://git.gloria-mundi.eu/tcr/plain/content/#1?id='.$commit.'}'; +} +&alt_tex_cmds; +$jobname = 'tcr'; diff --git a/content/math/berlekampMassey.cpp b/content/math/berlekampMassey.cpp index 29e084f..85a1031 100644 --- a/content/math/berlekampMassey.cpp +++ b/content/math/berlekampMassey.cpp @@ -1,6 +1,6 @@ constexpr ll mod = 1'000'000'007; vector<ll> BerlekampMassey(const vector<ll>& s) { - int n = sz(s), L = 0, m = 0; + int n = ssize(s), L = 0, m = 0; vector<ll> C(n), B(n), T; C[0] = B[0] = 1; diff --git a/content/math/bigint.cpp b/content/math/bigint.cpp index 1b3b953..a40f515 100644 --- a/content/math/bigint.cpp +++ b/content/math/bigint.cpp @@ -7,9 +7,9 @@ struct bigint { bigint() : sign(1) {} - bigint(ll v) {*this = v;} + bigint(ll v) { *this = v; } - bigint(const string &s) {read(s);} + bigint(const string &s) { read(s); } void operator=(ll v) { sign = 1; @@ -22,10 +22,11 @@ struct bigint { bigint operator+(const bigint& v) const { if (sign == v.sign) { bigint res = v; - for (ll i = 0, carry = 0; i < max(sz(a), sz(v.a)) || carry; ++i) { - if (i == sz(res.a)) + for (ll i = 0, carry = 0; + i < max(ssize(a), ssize(v.a)) || carry; ++i) { + if (i == ssize(res.a)) res.a.push_back(0); - res.a[i] += carry + (i < sz(a) ? a[i] : 0); + res.a[i] += carry + (i < ssize(a) ? a[i] : 0); carry = res.a[i] >= base; if (carry) res.a[i] -= base; @@ -39,8 +40,8 @@ struct bigint { if (sign == v.sign) { if (abs() >= v.abs()) { bigint res = *this; - for (ll i = 0, carry = 0; i < sz(v.a) || carry; ++i) { - res.a[i] -= carry + (i < sz(v.a) ? v.a[i] : 0); + for (ll i = 0, carry = 0; i < ssize(v.a) || carry; ++i) { + res.a[i] -= carry + (i < ssize(v.a) ? v.a[i] : 0); carry = res.a[i] < 0; if (carry) res.a[i] += base; } @@ -54,8 +55,8 @@ struct bigint { void operator*=(ll v) { if (v < 0) sign = -sign, v = -v; - for (ll i = 0, carry = 0; i < sz(a) || carry; ++i) { - if (i == sz(a)) a.push_back(0); + for (ll i = 0, carry = 0; i < ssize(a) || carry; ++i) { + if (i == ssize(a)) a.push_back(0); ll cur = a[i] * v + carry; carry = cur / base; a[i] = cur % base; @@ -74,12 +75,12 @@ struct bigint { bigint a = a1.abs() * norm; bigint b = b1.abs() * norm; bigint q, r; - q.a.resize(sz(a.a)); - for (ll i = sz(a.a) - 1; i >= 0; i--) { + q.a.resize(ssize(a.a)); + for (ll i = ssize(a.a) - 1; i >= 0; i--) { r *= base; r += a.a[i]; - ll s1 = sz(r.a) <= sz(b.a) ? 0 : r.a[sz(b.a)]; - ll s2 = sz(r.a) <= sz(b.a) - 1 ? 0 : r.a[sz(b.a) - 1]; + ll s1 = ssize(r.a) <= ssize(b.a) ? 0 : r.a[ssize(b.a)]; + ll s2 = ssize(r.a) <= ssize(b.a) - 1 ? 0 : r.a[ssize(b.a) - 1]; ll d = (base * s1 + s2) / b.a.back(); r -= b * d; while (r < 0) r += b, --d; @@ -102,7 +103,7 @@ struct bigint { void operator/=(ll v) { if (v < 0) sign = -sign, v = -v; - for (ll i = sz(a) - 1, rem = 0; i >= 0; --i) { + for (ll i = ssize(a) - 1, rem = 0; i >= 0; --i) { ll cur = a[i] + rem * base; a[i] = cur / v; rem = cur % v; @@ -119,7 +120,7 @@ struct bigint { ll operator%(ll v) const { if (v < 0) v = -v; ll m = 0; - for (ll i = sz(a) - 1; i >= 0; --i) + for (ll i = ssize(a) - 1; i >= 0; --i) m = (a[i] + m * base) % v; return m * sign; } @@ -139,9 +140,9 @@ struct bigint { bool operator<(const bigint& v) const { if (sign != v.sign) return sign < v.sign; - if (sz(a) != sz(v.a)) - return sz(a) * sign < sz(v.a) * v.sign; - for (ll i = sz(a) - 1; i >= 0; i--) + if (ssize(a) != ssize(v.a)) + return ssize(a) * sign < ssize(v.a) * v.sign; + for (ll i = ssize(a) - 1; i >= 0; i--) if (a[i] != v.a[i]) return a[i] * sign < v.a[i] * sign; return false; @@ -169,7 +170,7 @@ struct bigint { } bool isZero() const { - return a.empty() || (sz(a) == 1 && a[0] == 0); + return a.empty() || (ssize(a) == 1 && a[0] == 0); } bigint operator-() const { @@ -186,7 +187,7 @@ struct bigint { ll longValue() const { ll res = 0; - for (ll i = sz(a) - 1; i >= 0; i--) + for (ll i = ssize(a) - 1; i >= 0; i--) res = res * base + a[i]; return res * sign; } @@ -195,11 +196,11 @@ struct bigint { sign = 1; a.clear(); ll pos = 0; - while (pos < sz(s) && (s[pos] == '-' || s[pos] == '+')) { + while (pos < ssize(s) && (s[pos] == '-' || s[pos] == '+')) { if (s[pos] == '-') sign = -sign; ++pos; } - for (ll i = sz(s) - 1; i >= pos; i -= base_digits) { + for (ll i = ssize(s) - 1; i >= pos; i -= base_digits) { ll x = 0; for (ll j = max(pos, i - base_digits + 1); j <= i; j++) x = x * 10 + s[j] - '0'; @@ -218,13 +219,13 @@ struct bigint { friend ostream& operator<<(ostream& stream, const bigint& v) { if (v.sign == -1) stream << '-'; stream << (v.a.empty() ? 0 : v.a.back()); - for (ll i = sz(v.a) - 2; i >= 0; --i) + for (ll i = ssize(v.a) - 2; i >= 0; --i) stream << setw(base_digits) << setfill('0') << v.a[i]; return stream; } static vll karatsubaMultiply(const vll& a, const vll& b) { - ll n = sz(a); + ll n = ssize(a); vll res(n + n); if (n <= 32) { for (ll i = 0; i < n; i++) @@ -242,25 +243,25 @@ struct bigint { for (ll i = 0; i < k; i++) a2[i] += a1[i]; for (ll i = 0; i < k; i++) b2[i] += b1[i]; vll r = karatsubaMultiply(a2, b2); - for (ll i = 0; i < sz(a1b1); i++) r[i] -= a1b1[i]; - for (ll i = 0; i < sz(a2b2); i++) r[i] -= a2b2[i]; - for (ll i = 0; i < sz(r); i++) res[i + k] += r[i]; - for (ll i = 0; i < sz(a1b1); i++) res[i] += a1b1[i]; - for (ll i = 0; i < sz(a2b2); i++) res[i + n] += a2b2[i]; + for (ll i = 0; i < ssize(a1b1); i++) r[i] -= a1b1[i]; + for (ll i = 0; i < ssize(a2b2); i++) r[i] -= a2b2[i]; + for (ll i = 0; i < ssize(r); i++) res[i + k] += r[i]; + for (ll i = 0; i < ssize(a1b1); i++) res[i] += a1b1[i]; + for (ll i = 0; i < ssize(a2b2); i++) res[i + n] += a2b2[i]; return res; } bigint operator*(const bigint& v) const { vll ta(a.begin(), a.end()); vll va(v.a.begin(), v.a.end()); - while (sz(ta) < sz(va)) ta.push_back(0); - while (sz(va) < sz(ta)) va.push_back(0); - while (sz(ta) & (sz(ta) - 1)) + while (ssize(ta) < ssize(va)) ta.push_back(0); + while (ssize(va) < ssize(ta)) va.push_back(0); + while (ssize(ta) & (ssize(ta) - 1)) ta.push_back(0), va.push_back(0); vll ra = karatsubaMultiply(ta, va); bigint res; res.sign = sign * v.sign; - for (ll i = 0, carry = 0; i < sz(ra); i++) { + for (ll i = 0, carry = 0; i < ssize(ra); i++) { ll cur = ra[i] + carry; res.a.push_back(cur % base); carry = cur / base; diff --git a/content/math/binomial0.cpp b/content/math/binomial0.cpp index 5f2ccaa..f37aea5 100644 --- a/content/math/binomial0.cpp +++ b/content/math/binomial0.cpp @@ -10,5 +10,5 @@ void precalc() { ll calc_binom(ll n, ll k) { if (n < 0 || n < k || k < 0) return 0; - return (inv[k] * inv[n-k] % mod) * fac[n] % mod; + return (fac[n] * inv[n-k] % mod) * inv[k] % mod; } diff --git a/content/math/binomial1.cpp b/content/math/binomial1.cpp index dab20b3..d0fce18 100644 --- a/content/math/binomial1.cpp +++ b/content/math/binomial1.cpp @@ -1,7 +1,7 @@ ll calc_binom(ll n, ll k) { if (k > n) return 0; ll r = 1; - for (ll d = 1; d <= k; d++) {// Reihenfolge => Teilbarkeit + for (ll d = 1; d <= k; d++) { // Reihenfolge => Teilbarkeit r *= n--, r /= d; } return r; diff --git a/content/math/discreteLogarithm.cpp b/content/math/discreteLogarithm.cpp index 68866e0..844bd27 100644 --- a/content/math/discreteLogarithm.cpp +++ b/content/math/discreteLogarithm.cpp @@ -5,11 +5,11 @@ ll dlog(ll a, ll b, ll m) { //a > 0! vals[i] = {e, i}; } vals.emplace_back(m, 0); - sort(all(vals)); + ranges::sort(vals); ll fact = powMod(a, m - bound - 1, m); for (ll i = 0; i < m; i += bound, b = (b * fact) % m) { - auto it = lower_bound(all(vals), pair<ll, ll>{b, 0}); + auto it = ranges::lower_bound(vals, pair<ll, ll>{b, 0}); if (it->first == b) { return (i + it->second) % m; }} diff --git a/content/math/divisors.cpp b/content/math/divisors.cpp index 5afd4fb..2a17f54 100644 --- a/content/math/divisors.cpp +++ b/content/math/divisors.cpp @@ -2,7 +2,7 @@ ll countDivisors(ll n) { ll res = 1; for (ll i = 2; i * i * i <= n; i++) { ll c = 0; - while (n % i == 0) {n /= i; c++;} + while (n % i == 0) { n /= i; c++; } res *= c + 1; } if (isPrime(n)) res *= 2; diff --git a/content/math/gauss.cpp b/content/math/gauss.cpp index d431e52..719f573 100644 --- a/content/math/gauss.cpp +++ b/content/math/gauss.cpp @@ -7,7 +7,7 @@ void takeAll(int n, int line) { for (int i = 0; i < n; i++) { if (i == line) continue; double diff = mat[i][line]; - for (int j = 0; j < sz(mat[i]); j++) { + for (int j = 0; j < ssize(mat[i]); j++) { mat[i][j] -= diff * mat[line][j]; }}} @@ -22,7 +22,7 @@ int gauss(int n) { if (abs(mat[i][i]) > EPS) { normalLine(i); takeAll(n, i); - done[i] = true; + done[i] = true; }} for (int i = 0; i < n; i++) { // gauss fertig, prüfe Lösung bool allZero = true; diff --git a/content/math/gcd-lcm.cpp b/content/math/gcd-lcm.cpp index a1c63c8..1ee7ef5 100644 --- a/content/math/gcd-lcm.cpp +++ b/content/math/gcd-lcm.cpp @@ -1,2 +1,2 @@ -ll gcd(ll a, ll b) {return b == 0 ? a : gcd(b, a % b);} -ll lcm(ll a, ll b) {return a * (b / gcd(a, b));} +ll gcd(ll a, ll b) { return b == 0 ? a : gcd(b, a % b); } +ll lcm(ll a, ll b) { return a * (b / gcd(a, b)); } diff --git a/content/math/inversions.cpp b/content/math/inversions.cpp index 9e47f9b..289161f 100644 --- a/content/math/inversions.cpp +++ b/content/math/inversions.cpp @@ -1,7 +1,7 @@ ll inversions(const vector<ll>& v) { Tree<pair<ll, ll>> t; //ordered statistics tree @\sourceref{datastructures/pbds.cpp}@ ll res = 0; - for (ll i = 0; i < sz(v); i++) { + for (ll i = 0; i < ssize(v); i++) { res += i - t.order_of_key({v[i], i}); t.insert({v[i], i}); } diff --git a/content/math/inversionsMerge.cpp b/content/math/inversionsMerge.cpp index 8235b11..50fe37b 100644 --- a/content/math/inversionsMerge.cpp +++ b/content/math/inversionsMerge.cpp @@ -2,26 +2,26 @@ ll merge(vector<ll>& v, vector<ll>& left, vector<ll>& right) { int a = 0, b = 0, i = 0; ll inv = 0; - while (a < sz(left) && b < sz(right)) { + while (a < ssize(left) && b < ssize(right)) { if (left[a] < right[b]) v[i++] = left[a++]; else { - inv += sz(left) - a; + inv += ssize(left) - a; v[i++] = right[b++]; } } - while (a < sz(left)) v[i++] = left[a++]; - while (b < sz(right)) v[i++] = right[b++]; + while (a < ssize(left)) v[i++] = left[a++]; + while (b < ssize(right)) v[i++] = right[b++]; return inv; } ll mergeSort(vector<ll> &v) { // Sortiert v und gibt Inversionszahl zurück. - int n = sz(v); + int n = ssize(v); vector<ll> left(n / 2), right((n + 1) / 2); for (int i = 0; i < n / 2; i++) left[i] = v[i]; for (int i = n / 2; i < n; i++) right[i - n / 2] = v[i]; ll result = 0; - if (sz(left) > 1) result += mergeSort(left); - if (sz(right) > 1) result += mergeSort(right); + if (ssize(left) > 1) result += mergeSort(left); + if (ssize(right) > 1) result += mergeSort(right); return result + merge(v, left, right); } diff --git a/content/math/lgsFp.cpp b/content/math/lgsFp.cpp index bf18c86..64e4c09 100644 --- a/content/math/lgsFp.cpp +++ b/content/math/lgsFp.cpp @@ -7,7 +7,7 @@ void takeAll(int n, int line, ll p) { for (int i = 0; i < n; i++) { if (i == line) continue; ll diff = mat[i][line]; - for (int j = 0; j < sz(mat[i]); j++) { + for (int j = 0; j < ssize(mat[i]); j++) { mat[i][j] -= (diff * mat[line][j]) % p; mat[i][j] = (mat[i][j] + p) % p; }}} diff --git a/content/math/linearRecurrence.cpp b/content/math/linearRecurrence.cpp index a8adacd..eb04566 100644 --- a/content/math/linearRecurrence.cpp +++ b/content/math/linearRecurrence.cpp @@ -1,9 +1,9 @@ constexpr ll mod = 998244353; // oder ntt mul @\sourceref{math/transforms/ntt.cpp}@ vector<ll> mul(const vector<ll>& a, const vector<ll>& b) { - vector<ll> c(sz(a) + sz(b) - 1); - for (int i = 0; i < sz(a); i++) { - for (int j = 0; j < sz(b); j++) { + vector<ll> c(ssize(a) + ssize(b) - 1); + for (int i = 0; i < ssize(a); i++) { + for (int j = 0; j < ssize(b); j++) { c[i+j] += a[i]*b[j] % mod; }} for (ll& x : c) x %= mod; @@ -11,7 +11,7 @@ vector<ll> mul(const vector<ll>& a, const vector<ll>& b) { } ll kthTerm(const vector<ll>& f, const vector<ll>& c, ll k) { - int n = sz(c); + int n = ssize(c); vector<ll> q(n + 1, 1); for (int i = 0; i < n; i++) q[i + 1] = (mod - c[i]) % mod; vector<ll> p = mul(f, q); diff --git a/content/math/linearRecurrenceOld.cpp b/content/math/linearRecurrenceOld.cpp index 2501e64..f67398d 100644 --- a/content/math/linearRecurrenceOld.cpp +++ b/content/math/linearRecurrenceOld.cpp @@ -1,7 +1,7 @@ constexpr ll mod = 1'000'000'007; vector<ll> modMul(const vector<ll>& a, const vector<ll>& b, const vector<ll>& c) { - ll n = sz(c); + ll n = ssize(c); vector<ll> res(n * 2 + 1); for (int i = 0; i <= n; i++) { //a*b for (int j = 0; j <= n; j++) { @@ -18,8 +18,8 @@ vector<ll> modMul(const vector<ll>& a, const vector<ll>& b, } ll kthTerm(const vector<ll>& f, const vector<ll>& c, ll k) { - assert(sz(f) == sz(c)); - vector<ll> tmp(sz(c) + 1), a(sz(c) + 1); + assert(ssize(f) == ssize(c)); + vector<ll> tmp(ssize(c) + 1), a(ssize(c) + 1); tmp[0] = a[1] = 1; //tmp = (x^k) % c for (k++; k > 0; k /= 2) { @@ -28,6 +28,6 @@ ll kthTerm(const vector<ll>& f, const vector<ll>& c, ll k) { } ll res = 0; - for (int i = 0; i < sz(c); i++) res += (tmp[i+1] * f[i]) % mod; + for (int i = 0; i < ssize(c); i++) res += (tmp[i+1] * f[i]) % mod; return res % mod; } diff --git a/content/math/linearSieve.cpp b/content/math/linearSieve.cpp index 64440dd..2ea1e94 100644 --- a/content/math/linearSieve.cpp +++ b/content/math/linearSieve.cpp @@ -3,12 +3,12 @@ ll small[N], power[N], sieved[N]; vector<ll> primes; //wird aufgerufen mit (p^k, p, k) für prime p und k > 0 -ll mu(ll pk, ll p, ll k) {return -(k == 1);} -ll phi(ll pk, ll p, ll k) {return pk - pk / p;} -ll div(ll pk, ll p, ll k) {return k+1;} -ll divSum(ll pk, ll p, ll k) {return (pk*p-1) / (p - 1);} -ll square(ll pk, ll p, ll k) {return k % 2 ? pk / p : pk;} -ll squareFree(ll pk, ll p, ll k) {return p;} +ll mu(ll pk, ll p, ll k) { return -(k == 1); } +ll phi(ll pk, ll p, ll k) { return pk - pk / p; } +ll div(ll pk, ll p, ll k) { return k+1; } +ll divSum(ll pk, ll p, ll k) { return (pk*p-1) / (p - 1); } +ll square(ll pk, ll p, ll k) { return k % 2 ? pk / p : pk; } +ll squareFree(ll pk, ll p, ll k) { return p; } void sieve() { // O(N) small[1] = power[1] = sieved[1] = 1; diff --git a/content/math/longestIncreasingSubsequence.cpp b/content/math/longestIncreasingSubsequence.cpp index fcb63b4..e4863d0 100644 --- a/content/math/longestIncreasingSubsequence.cpp +++ b/content/math/longestIncreasingSubsequence.cpp @@ -1,8 +1,8 @@ vector<int> lis(vector<ll>& a) { - int n = sz(a), len = 0; + int n = ssize(a), len = 0; vector<ll> dp(n, INF), dp_id(n), prev(n); for (int i = 0; i < n; i++) { - int pos = lower_bound(all(dp), a[i]) - dp.begin(); + int pos = ranges::lower_bound(dp, a[i]) - begin(dp); dp[pos] = a[i]; dp_id[pos] = i; prev[i] = pos ? dp_id[pos - 1] : -1; diff --git a/content/math/math.tex b/content/math/math.tex index 4ac6c9e..fdf7081 100644 --- a/content/math/math.tex +++ b/content/math/math.tex @@ -26,7 +26,7 @@ \end{methods}
\sourcecode{math/permIndex.cpp}
\end{algorithm}
-\clearpage
+\columnbreak
\subsection{Mod-Exponent und Multiplikation über $\boldsymbol{\mathbb{F}_p}$}
%\vspace{-1.25em}
@@ -100,8 +100,8 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: wenn $a\equiv~b \bmod \ggT(m, n)$.
In diesem Fall sind keine Faktoren
auf der linken Seite erlaubt.
- \end{itemize}
- \sourcecode{math/chineseRemainder.cpp}
+ \end{itemize}
+ \sourcecode{math/chineseRemainder.cpp}
\end{algorithm}
\begin{algorithm}{Primzahltest \& Faktorisierung}
@@ -121,7 +121,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: \begin{algorithm}{Matrix-Exponent}
\begin{methods}
\method{precalc}{berechnet $m^{2^b}$ vor}{\log(b)\*n^3}
- \method{calc}{berechnet $m^b\cdot$}{\log(b)\cdot n^2}
+ \method{calc}{berechnet $m^b \cdot v$}{\log(b)\cdot n^2}
\end{methods}
\textbf{Tipp:} wenn \code{v[x]=1} und \code{0} sonst, dann ist \code{res[y]} = $m^b_{y,x}$.
\sourcecode{math/matrixPower.cpp}
@@ -236,7 +236,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: \sourcecode{math/legendre.cpp}
\end{algorithm}
-\begin{algorithm}{Lineares Sieb und Multiplikative Funktionen}
+\begin{algorithm}{Lineares Sieb und multiplikative Funktionen}
Eine (zahlentheoretische) Funktion $f$ heißt multiplikativ wenn $f(1)=1$ und $f(a\cdot b)=f(a)\cdot f(b)$, falls $\ggT(a,b)=1$.
$\Rightarrow$ Es ist ausreichend $f(p^k)$ für alle primen $p$ und alle $k$ zu kennen.
@@ -250,7 +250,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: \textbf{Wichtig:} Sieb rechts ist schneller für \code{isPrime} oder \code{primes}!
\sourcecode{math/linearSieve.cpp}
- \textbf{\textsc{Möbius}-Funktion:}
+ \textbf{\textsc{Möbius} Funktion:}
\begin{itemize}
\item $\mu(n)=+1$, falls $n$ quadratfrei ist und gerade viele Primteiler hat
\item $\mu(n)=-1$, falls $n$ quadratfrei ist und ungerade viele Primteiler hat
@@ -263,7 +263,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: \item $p$ prim, $k \in \mathbb{N}$:
$~\varphi(p^k) = p^k - p^{k - 1}$
- \item \textbf{Euler's Theorem:}
+ \item \textbf{\textsc{Euler}'s Theorem:}
Für $b \geq \varphi(c)$ gilt: $a^b \equiv a^{b \bmod \varphi(c) + \varphi(c)} \pmod{c}$. Darüber hinaus gilt: $\gcd(a, c) = 1 \Leftrightarrow a^b \equiv a^{b \bmod \varphi(c)} \pmod{c}$.
Falls $m$ prim ist, liefert das den \textbf{kleinen Satz von \textsc{Fermat}}:
$a^{m} \equiv a \pmod{m}$
@@ -321,6 +321,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: \end{algorithm}
\begin{algorithm}{Polynome, FFT, NTT \& andere Transformationen}
+ \label{fft}
Multipliziert Polynome $A$ und $B$.
\begin{itemize}
\item $\deg(A \cdot B) = \deg(A) + \deg(B)$
@@ -328,14 +329,15 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: $\deg(A \cdot B) + 1$ haben.
Größe muss eine Zweierpotenz sein.
\item Für ganzzahlige Koeffizienten: \code{(ll)round(real(a[i]))}
- \item \emph{xor}, \emph{or} und \emph{and} Transform funktioniert auch mit \code{double} oder modulo einer Primzahl $p$ falls $p \geq 2^{\texttt{bits}}$
+ \item \emph{or} Transform berechnet sum over subsets
+ $\rightarrow$ inverse für inclusion/exclusion
\end{itemize}
%\sourcecode{math/fft.cpp}
%\sourcecode{math/ntt.cpp}
\sourcecode{math/transforms/fft.cpp}
\sourcecode{math/transforms/ntt.cpp}
\sourcecode{math/transforms/bitwiseTransforms.cpp}
- Multiplikation mit 2 transforms statt 3: (nur benutzten wenn nötig!)
+ Multiplikation mit 2 Transforms statt 3: (nur benutzen wenn nötig!)
\sourcecode{math/transforms/fftMul.cpp}
\end{algorithm}
@@ -345,7 +347,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: \subsection{Kombinatorik}
-\paragraph{Wilsons Theorem}
+\paragraph{\textsc{Wilson}'s Theorem}
A number $n$ is prime if and only if
$(n-1)!\equiv -1\bmod{n}$.\\
($n$ is prime if and only if $(m-1)!\cdot(n-m)!\equiv(-1)^m\bmod{n}$ for all $m$ in $\{1,\dots,n\}$)
@@ -357,14 +359,14 @@ $(n-1)!\equiv -1\bmod{n}$.\\ \end{cases}
\end{align*}
-\paragraph{\textsc{Zeckendorfs} Theorem}
+\paragraph{\textsc{Zeckendorf}'s Theorem}
Jede positive natürliche Zahl kann eindeutig als Summe einer oder mehrerer
verschiedener \textsc{Fibonacci}-Zahlen geschrieben werden, sodass keine zwei
aufeinanderfolgenden \textsc{Fibonacci}-Zahlen in der Summe vorkommen.\\
\emph{Lösung:} Greedy, nimm immer die größte \textsc{Fibonacci}-Zahl, die noch
hineinpasst.
-\paragraph{\textsc{Lucas}-Theorem}
+\paragraph{\textsc{Lucas}'s Theorem}
Ist $p$ prim, $m=\sum_{i=0}^km_ip^i$, $n=\sum_{i=0}^kn_ip^i$ ($p$-adische Darstellung),
so gilt
\vspace{-0.75\baselineskip}
@@ -542,10 +544,10 @@ Wenn man $k$ Spiele in den Zuständen $X_1, \ldots, X_k$ hat, dann ist die \text \input{math/tables/series}
\subsection{Wichtige Zahlen}
-\input{math/tables/composite}
+\input{math/tables/prime-composite}
-\subsection{Recover $\boldsymbol{x}$ and $\boldsymbol{y}$ from $\boldsymbol{y}$ from $\boldsymbol{x\*y^{-1}}$ }
-\method{recover}{findet $x$ und $y$ für $x=x\*y^{-1}\bmod m$}{\log(m)}
+\subsection{Recover $\boldsymbol{x}$ and $\boldsymbol{y}$ from $\boldsymbol{x\*y^{-1}}$ }
+\method{recover}{findet $x$ und $y$ für $c=x\*y^{-1}\bmod m$}{\log(m)}
\textbf{WICHTIG:} $x$ und $y$ müssen kleiner als $\sqrt{\nicefrac{m}{2}}$ sein!
\sourcecode{math/recover.cpp}
diff --git a/content/math/matrixPower.cpp b/content/math/matrixPower.cpp index d981e6e..d80dac6 100644 --- a/content/math/matrixPower.cpp +++ b/content/math/matrixPower.cpp @@ -1,14 +1,14 @@ vector<mat> pows; void precalc(mat m) { - pows = {mat(sz(m.m), 1), m}; - for (int i = 1; i < 60; i++) pows.push_back(pows[i] * pows[i]); + pows = {m}; + for (int i = 0; i < 60; i++) pows.push_back(pows[i] * pows[i]); } auto calc(ll b, vector<ll> v) { - for (ll i = 1; b > 0; i++) { + for (ll i = 0; b > 0; i++) { if (b & 1) v = pows[i] * v; - b /= 2; + b >>= 1; } return v; } diff --git a/content/math/permIndex.cpp b/content/math/permIndex.cpp index 4cffc12..563b33a 100644 --- a/content/math/permIndex.cpp +++ b/content/math/permIndex.cpp @@ -1,12 +1,12 @@ ll permIndex(vector<ll> v) { Tree<ll> t; - reverse(all(v)); + ranges::reverse(v); for (ll& x : v) { t.insert(x); x = t.order_of_key(x); } ll res = 0; - for (int i = sz(v); i > 0; i--) { + for (int i = ssize(v); i > 0; i--) { res = res * i + v[i - 1]; } return res; diff --git a/content/math/piLegendre.cpp b/content/math/piLegendre.cpp index 21b974b..6401a4f 100644 --- a/content/math/piLegendre.cpp +++ b/content/math/piLegendre.cpp @@ -1,23 +1,23 @@ -constexpr ll cache = 500; // requires O(cache^3)
-vector<vector<ll>> memo(cache * cache, vector<ll>(cache));
-
-ll pi(ll n);
-
-ll phi(ll n, ll k) {
- if (n <= 1 || k < 0) return 0;
- if (n <= primes[k]) return n - 1;
- if (n < N && primes[k] * primes[k] > n) return n - pi(n) + k;
- bool ok = n < cache * cache;
- if (ok && memo[n][k] > 0) return memo[n][k];
- ll res = n/primes[k] - phi(n/primes[k], k - 1) + phi(n, k - 1);
- if (ok) memo[n][k] = res;
- return res;
-}
-
-ll pi(ll n) {
- if (n < N) { // implement this as O(1) lookup for speedup!
- return distance(primes.begin(), upper_bound(all(primes), n));
- } else {
- ll k = pi(sqrtl(n) + 1);
- return n - phi(n, k) + k;
-}}
+constexpr ll cache = 500; // requires O(cache^3) +vector<vector<ll>> memo(cache * cache, vector<ll>(cache)); + +ll pi(ll n); + +ll phi(ll n, ll k) { + if (n <= 1 || k < 0) return 0; + if (n <= primes[k]) return n - 1; + if (n < N && primes[k] * primes[k] > n) return n - pi(n) + k; + bool ok = n < cache * cache; + if (ok && memo[n][k] > 0) return memo[n][k]; + ll res = n/primes[k] - phi(n/primes[k], k - 1) + phi(n, k - 1); + if (ok) memo[n][k] = res; + return res; +} + +ll pi(ll n) { + if (n < N) { // implement this as O(1) lookup for speedup! + return ranges::upper_bound(primes, n) - begin(primes); + } else { + ll k = pi(sqrtl(n) + 1); + return n - phi(n, k) + k; +}} diff --git a/content/math/polynomial.cpp b/content/math/polynomial.cpp index 84f3aaa..12a4fd7 100644 --- a/content/math/polynomial.cpp +++ b/content/math/polynomial.cpp @@ -4,15 +4,15 @@ struct poly { poly(int deg = 0) : data(1 + deg) {} poly(initializer_list<ll> _data) : data(_data) {} - int size() const {return sz(data);} + int size() const { return ssize(data); } void trim() { for (ll& x : data) x = (x % mod + mod) % mod; while (size() > 1 && data.back() == 0) data.pop_back(); } - ll& operator[](int x) {return data[x];} - const ll& operator[](int x) const {return data[x];} + ll& operator[](int x) { return data[x]; } + const ll& operator[](int x) const { return data[x]; } ll operator()(int x) const { ll res = 0; diff --git a/content/math/primeSieve.cpp b/content/math/primeSieve.cpp index 1b0f514..2b2bf26 100644 --- a/content/math/primeSieve.cpp +++ b/content/math/primeSieve.cpp @@ -8,7 +8,7 @@ bool isPrime(ll x) { } void primeSieve() { - for (ll i = 3; i < N; i += 2) {// i * i < N reicht für isPrime + for (ll i = 3; i < N; i += 2) { // i * i < N reicht für isPrime if (!isNotPrime[i / 2]) { primes.push_back(i); // optional for (ll j = i * i; j < N; j+= 2 * i) { diff --git a/content/math/recover.cpp b/content/math/recover.cpp index 1a593f0..a4c22aa 100644 --- a/content/math/recover.cpp +++ b/content/math/recover.cpp @@ -1,4 +1,4 @@ -ll sq(ll x) {return x*x;} +ll sq(ll x) { return x*x; } array<ll, 2> recover(ll c, ll m) { array<ll, 2> u = {m, 0}, v = {c, 1}; diff --git a/content/math/rho.cpp b/content/math/rho.cpp index ad640cd..c7f7a70 100644 --- a/content/math/rho.cpp +++ b/content/math/rho.cpp @@ -2,7 +2,7 @@ using lll = __int128; ll rho(ll n) { // Findet Faktor < n, nicht unbedingt prim. if (n % 2 == 0) return 2; ll x = 0, y = 0, prd = 2, i = n/2 + 7; - auto f = [&](lll c){return (c * c + i) % n;}; + auto f = [&](lll c) { return (c * c + i) % n; }; for (ll t = 30; t % 40 || gcd(prd, n) == 1; t++) { if (x == y) x = ++i, y = f(x); if (ll q = (lll)prd * abs(x-y) % n; q) prd = q; @@ -13,7 +13,7 @@ ll rho(ll n) { // Findet Faktor < n, nicht unbedingt prim. void factor(ll n, map<ll, int>& facts) { if (n == 1) return; - if (isPrime(n)) {facts[n]++; return;} + if (isPrime(n)) { facts[n]++; return; } ll f = rho(n); factor(n / f, facts); factor(f, facts); } diff --git a/content/math/shortModInv.cpp b/content/math/shortModInv.cpp index cf91ca0..7d3002c 100644 --- a/content/math/shortModInv.cpp +++ b/content/math/shortModInv.cpp @@ -1,3 +1,3 @@ ll multInv(ll x, ll m) { // x^{-1} mod m - return 1 < x ? m - multInv(m % x, x) * m / x : 1; + return 1 < (x %= m) ? m - multInv(m, x) * m / x : 1; } diff --git a/content/math/simpson.cpp b/content/math/simpson.cpp index 7f237a4..da9c002 100644 --- a/content/math/simpson.cpp +++ b/content/math/simpson.cpp @@ -1,4 +1,4 @@ -//double f(double x) {return x;} +//double f(double x) { return x; } double simps(double a, double b) { return (f(a) + 4.0 * f((a + b) / 2.0) + f(b)) * (b - a) / 6.0; diff --git a/content/math/sqrtModCipolla.cpp b/content/math/sqrtModCipolla.cpp index 1fac0c5..c062646 100644 --- a/content/math/sqrtModCipolla.cpp +++ b/content/math/sqrtModCipolla.cpp @@ -1,4 +1,4 @@ -ll sqrtMod(ll a, ll p) {// teste mit legendre ob lösung existiert +ll sqrtMod(ll a, ll p) {// teste mit Legendre ob Lösung existiert if (a < 2) return a; ll t = 0; while (legendre((t*t-4*a) % p, p) >= 0) t = rng() % p; diff --git a/content/math/tables/composite.tex b/content/math/tables/composite.tex deleted file mode 100644 index 7a6ab09..0000000 --- a/content/math/tables/composite.tex +++ /dev/null @@ -1,26 +0,0 @@ -\begin{expandtable} -\begin{tabularx}{\linewidth}{|r||r|R||r||r|} - \hline - $10^x$ & Highly Composite & \# Divs & \# prime Divs & \# Primes \\ - \hline - 1 & 6 & 4 & 2 & 4 \\ - 2 & 60 & 12 & 3 & 25 \\ - 3 & 840 & 32 & 4 & 168 \\ - 4 & 7\,560 & 64 & 5 & 1\,229 \\ - 5 & 83\,160 & 128 & 6 & 9\,592 \\ - 6 & 720\,720 & 240 & 7 & 78\,498 \\ - 7 & 8\,648\,640 & 448 & 8 & 664\,579 \\ - 8 & 73\,513\,440 & 768 & 8 & 5\,761\,455 \\ - 9 & 735\,134\,400 & 1\,344 & 9 & 50\,847\,534 \\ - 10 & 6\,983\,776\,800 & 2\,304 & 10 & 455\,052\,511 \\ - 11 & 97\,772\,875\,200 & 4\,032 & 10 & 4\,118\,054\,813 \\ - 12 & 963\,761\,198\,400 & 6\,720 & 11 & 37\,607\,912\,018 \\ - 13 & 9\,316\,358\,251\,200 & 10\,752 & 12 & 346\,065\,536\,839 \\ - 14 & 97\,821\,761\,637\,600 & 17\,280 & 12 & 3\,204\,941\,750\,802 \\ - 15 & 866\,421\,317\,361\,600 & 26\,880 & 13 & 29\,844\,570\,422\,669 \\ - 16 & 8\,086\,598\,962\,041\,600 & 41\,472 & 13 & 279\,238\,341\,033\,925 \\ - 17 & 74\,801\,040\,398\,884\,800 & 64\,512 & 14 & 2\,623\,557\,157\,654\,233 \\ - 18 & 897\,612\,484\,786\,617\,600 & 103\,680 & 16 & 24\,739\,954\,287\,740\,860 \\ - \hline -\end{tabularx} -\end{expandtable} diff --git a/content/math/tables/prime-composite.tex b/content/math/tables/prime-composite.tex new file mode 100644 index 0000000..b8adadf --- /dev/null +++ b/content/math/tables/prime-composite.tex @@ -0,0 +1,31 @@ +\begin{expandtable} +\begin{tabularx}{\linewidth}{|r|rIr|rIr|r|R|} + \hline + \multirow{2}{*}{$10^x$} + & \multirow{2}{*}{Highly Composite} + & \multirow{2}{*}{\# Divs} + & \multicolumn{2}{c|}{Prime} + & \multirow{2}{*}{\# Primes} & \# Prime \\ + & & & $<$ & $>$ & & Factors \\ + \hline + 1 & 6 & 4 & $-3$ & $+1$ & 4 & 2 \\ + 2 & 60 & 12 & $-3$ & $+1$ & 25 & 3 \\ + 3 & 840 & 32 & $-3$ & $+9$ & 168 & 4 \\ + 4 & 7\,560 & 64 & $-27$ & $+7$ & 1\,229 & 5 \\ + 5 & 83\,160 & 128 & $-9$ & $+3$ & 9\,592 & 6 \\ + 6 & 720\,720 & 240 & $-17$ & $+3$ & 78\,498 & 7 \\ + 7 & 8\,648\,640 & 448 & $-9$ & $+19$ & 664\,579 & 8 \\ + 8 & 73\,513\,440 & 768 & $-11$ & $+7$ & 5\,761\,455 & 8 \\ + 9 & 735\,134\,400 & 1\,344 & $-63$ & $+7$ & 50\,847\,534 & 9 \\ + 10 & 6\,983\,776\,800 & 2\,304 & $-33$ & $+19$ & 455\,052\,511 & 10 \\ + 11 & 97\,772\,875\,200 & 4\,032 & $-23$ & $+3$ & 4\,118\,054\,813 & 10 \\ + 12 & 963\,761\,198\,400 & 6\,720 & $-11$ & $+39$ & 37\,607\,912\,018 & 11 \\ + 13 & 9\,316\,358\,251\,200 & 10\,752 & $-29$ & $+37$ & 346\,065\,536\,839 & 12 \\ + 14 & 97\,821\,761\,637\,600 & 17\,280 & $-27$ & $+31$ & 3\,204\,941\,750\,802 & 12 \\ + 15 & 866\,421\,317\,361\,600 & 26\,880 & $-11$ & $+37$ & 29\,844\,570\,422\,669 & 13 \\ + 16 & 8\,086\,598\,962\,041\,600 & 41\,472 & $-63$ & $+61$ & 279\,238\,341\,033\,925 & 13 \\ + 17 & 74\,801\,040\,398\,884\,800 & 64\,512 & $-3$ & $+3$ & 2\,623\,557\,157\,654\,233 & 14 \\ + 18 & 897\,612\,484\,786\,617\,600 & 103\,680 & $-11$ & $+3$ & 24\,739\,954\,287\,740\,860 & 15 \\ + \hline +\end{tabularx} +\end{expandtable} diff --git a/content/math/transforms/andTransform.cpp b/content/math/transforms/andTransform.cpp index 1fd9f5c..9e40c74 100644 --- a/content/math/transforms/andTransform.cpp +++ b/content/math/transforms/andTransform.cpp @@ -1,8 +1,8 @@ void fft(vector<ll>& a, bool inv = false) { - int n = sz(a); + int n = ssize(a); for (int s = 1; s < n; s *= 2) { for (int i = 0; i < n; i += 2 * s) { for (int j = i; j < i + s; j++) { ll& u = a[j], &v = a[j + s]; - tie(u, v) = inv ? pair(v - u, u) : pair(v, u + v); + tie(u, v) = inv ? pair(u - v, v) : pair(u + v, v); }}}} diff --git a/content/math/transforms/bitwiseTransforms.cpp b/content/math/transforms/bitwiseTransforms.cpp index 28561da..17f3163 100644 --- a/content/math/transforms/bitwiseTransforms.cpp +++ b/content/math/transforms/bitwiseTransforms.cpp @@ -1,11 +1,11 @@ void bitwiseConv(vector<ll>& a, bool inv = false) { - int n = sz(a); + int n = ssize(a); for (int s = 1; s < n; s *= 2) { for (int i = 0; i < n; i += 2 * s) { for (int j = i; j < i + s; j++) { ll& u = a[j], &v = a[j + s]; - tie(u, v) = inv ? pair(v - u, u) : pair(v, u + v); // AND - //tie(u, v) = inv ? pair(v, u - v) : pair(u + v, u); //OR + tie(u, v) = inv ? pair(u - v, v) : pair(u + v, v); // AND + //tie(u, v) = inv ? pair(u, v - u) : pair(u, v + u); //OR //tie(u, v) = pair(u + v, u - v); // XOR }}} //if (inv) for (ll& x : a) x /= n; // XOR (careful with MOD) diff --git a/content/math/transforms/fft.cpp b/content/math/transforms/fft.cpp index 2bd95b2..1f80e36 100644 --- a/content/math/transforms/fft.cpp +++ b/content/math/transforms/fft.cpp @@ -1,7 +1,7 @@ using cplx = complex<double>; void fft(vector<cplx>& a, bool inv = false) { - int n = sz(a); + int n = ssize(a); for (int i = 0, j = 1; j < n - 1; ++j) { for (int k = n >> 1; k > (i ^= k); k >>= 1); if (j < i) swap(a[i], a[j]); diff --git a/content/math/transforms/fftMul.cpp b/content/math/transforms/fftMul.cpp index 660ed79..da6a538 100644 --- a/content/math/transforms/fftMul.cpp +++ b/content/math/transforms/fftMul.cpp @@ -1,8 +1,8 @@ vector<cplx> mul(vector<ll>& a, vector<ll>& b) { - int n = 1 << (__lg(sz(a) + sz(b) - 1) + 1); - vector<cplx> c(all(a)), d(n); + int n = 1 << (__lg(ssize(a) + ssize(b) - 1) + 1); + vector<cplx> c(begin(a), end(a)), d(n); c.resize(n); - for (int i = 0; i < sz(b); i++) c[i] = {real(c[i]), b[i]}; + for (int i = 0; i < ssize(b); i++) c[i] = {real(c[i]), b[i]}; fft(c); for (int i = 0; i < n; i++) { int j = (n - i) & (n - 1); diff --git a/content/math/transforms/multiplyBitwise.cpp b/content/math/transforms/multiplyBitwise.cpp index f7cf169..5275b8c 100644 --- a/content/math/transforms/multiplyBitwise.cpp +++ b/content/math/transforms/multiplyBitwise.cpp @@ -1,5 +1,5 @@ vector<ll> mul(vector<ll> a, vector<ll> b) { - int n = 1 << (__lg(2 * max(sz(a), sz(b)) - 1)); + int n = 1 << (__lg(2 * max(ssize(a), ssize(b)) - 1)); a.resize(n), b.resize(n); bitwiseConv(a), bitwiseConv(b); for (int i=0; i<n; i++) a[i] *= b[i]; // MOD? diff --git a/content/math/transforms/multiplyFFT.cpp b/content/math/transforms/multiplyFFT.cpp index 0022d1f..963be94 100644 --- a/content/math/transforms/multiplyFFT.cpp +++ b/content/math/transforms/multiplyFFT.cpp @@ -1,6 +1,6 @@ vector<ll> mul(vector<ll>& a, vector<ll>& b) { - int n = 1 << (__lg(sz(a) + sz(b) - 1) + 1); - vector<cplx> a2(all(a)), b2(all(b)); + int n = 1 << (__lg(ssize(a) + ssize(b) - 1) + 1); + vector<cplx> a2(begin(a), end(a)), b2(begin(b), end(b)); a2.resize(n), b2.resize(n); fft(a2), fft(b2); for (int i=0; i<n; i++) a2[i] *= b2[i]; diff --git a/content/math/transforms/multiplyNTT.cpp b/content/math/transforms/multiplyNTT.cpp index 806d124..d234ce3 100644 --- a/content/math/transforms/multiplyNTT.cpp +++ b/content/math/transforms/multiplyNTT.cpp @@ -1,5 +1,5 @@ vector<ll> mul(vector<ll> a, vector<ll> b) { - int n = 1 << (__lg(sz(a) + sz(b) - 1) + 1); + int n = 1 << bit_width(size(a) + size(b) - 1); a.resize(n), b.resize(n); ntt(a), ntt(b); for (int i=0; i<n; i++) a[i] = a[i] * b[i] % mod; diff --git a/content/math/transforms/ntt.cpp b/content/math/transforms/ntt.cpp index ca605d3..fc7874e 100644 --- a/content/math/transforms/ntt.cpp +++ b/content/math/transforms/ntt.cpp @@ -1,7 +1,7 @@ constexpr ll mod = 998244353, root = 3; void ntt(vector<ll>& a, bool inv = false) { - int n = sz(a); + int n = ssize(a); auto b = a; ll r = inv ? powMod(root, mod - 2, mod) : root; diff --git a/content/math/transforms/orTransform.cpp b/content/math/transforms/orTransform.cpp index eb1da44..6503a68 100644 --- a/content/math/transforms/orTransform.cpp +++ b/content/math/transforms/orTransform.cpp @@ -1,8 +1,8 @@ void fft(vector<ll>& a, bool inv = false) { - int n = sz(a); + int n = ssize(a); for (int s = 1; s < n; s *= 2) { for (int i = 0; i < n; i += 2 * s) { for (int j = i; j < i + s; j++) { ll& u = a[j], &v = a[j + s]; - tie(u, v) = inv ? pair(v, u - v) : pair(u + v, u); + tie(u, v) = inv ? pair(u, v - u) : pair(u, v + u); }}}} diff --git a/content/math/transforms/seriesOperations.cpp b/content/math/transforms/seriesOperations.cpp index b405698..3d8aa11 100644 --- a/content/math/transforms/seriesOperations.cpp +++ b/content/math/transforms/seriesOperations.cpp @@ -17,7 +17,7 @@ vector<ll> poly_inv(const vector<ll>& a, int n) { } vector<ll> poly_deriv(vector<ll> a) { - for (int i = 1; i < sz(a); i++) + for (int i = 1; i < ssize(a); i++) a[i-1] = a[i] * i % mod; a.pop_back(); return a; @@ -25,11 +25,11 @@ vector<ll> poly_deriv(vector<ll> a) { vector<ll> poly_integr(vector<ll> a) { static vector<ll> inv = {0, 1}; - for (static int i = 2; i <= sz(a); i++) + for (static int i = 2; i <= ssize(a); i++) inv.push_back(mod - mod / i * inv[mod % i] % mod); a.push_back(0); - for (int i = sz(a) - 1; i > 0; i--) + for (int i = ssize(a) - 1; i > 0; i--) a[i] = a[i-1] * inv[i] % mod; a[0] = 0; return a; @@ -46,7 +46,7 @@ vector<ll> poly_exp(vector<ll> a, int n) { for (int len = 1; len < n; len *= 2) { vector<ll> p = poly_log(q, 2*len); for (int i = 0; i < 2*len; i++) - p[i] = (mod - p[i] + (i < sz(a) ? a[i] : 0)) % mod; + p[i] = (mod - p[i] + (i < ssize(a) ? a[i] : 0)) % mod; vector<ll> q2 = q; q2.resize(2*len); ntt(p), ntt(q2); diff --git a/content/math/transforms/xorTransform.cpp b/content/math/transforms/xorTransform.cpp index f9d1d82..075aac3 100644 --- a/content/math/transforms/xorTransform.cpp +++ b/content/math/transforms/xorTransform.cpp @@ -1,5 +1,5 @@ void fft(vector<ll>& a, bool inv = false) { - int n = sz(a); + int n = ssize(a); for (int s = 1; s < n; s *= 2) { for (int i = 0; i < n; i += 2 * s) { for (int j = i; j < i + s; j++) { diff --git a/content/other/fastIO.cpp b/content/other/fastIO.cpp index 9badcc7..09473f4 100644 --- a/content/other/fastIO.cpp +++ b/content/other/fastIO.cpp @@ -16,7 +16,7 @@ void printPositive(int n) { } void fastprint(int n) { - if(n == 0) {putchar('0'); return;} + if(n == 0) { putchar('0'); return; } if (n < 0) { putchar('-'); printPositive(-n); diff --git a/content/other/fastSubsetSum.cpp b/content/other/fastSubsetSum.cpp index 84396f6..38a84b6 100644 --- a/content/other/fastSubsetSum.cpp +++ b/content/other/fastSubsetSum.cpp @@ -1,11 +1,11 @@ int fastSubsetSum(vector<int> w, int t){ int a = 0, b = 0; - while(b < sz(w) && a + w[b] <= t) a += w[b++]; - if(b == sz(w)) return a; - int m = *max_element(all(w)); + while(b < ssize(w) && a + w[b] <= t) a += w[b++]; + if(b == ssize(w)) return a; + int m = *ranges::max_element(w); vector<int> dp(2*m, -1), old; dp[m+a-t] = b; - for(int i = b; i < sz(w); i++){ + for(int i = b; i < ssize(w); i++){ old = dp; for(int j = 0; j < m; j++){ dp[j+w[i]] = max(dp[j+w[i]], old[j]); @@ -18,4 +18,4 @@ int fastSubsetSum(vector<int> w, int t){ } for(a = t; dp[m+a-t] < 0; a--); return a; -}
\ No newline at end of file +} diff --git a/content/other/josephus2.cpp b/content/other/josephus2.cpp index 33544ea..1c4295d 100644 --- a/content/other/josephus2.cpp +++ b/content/other/josephus2.cpp @@ -1,5 +1,5 @@ -int rotateLeft(int n) { // Der letzte Überlebende, 1-basiert. +ll rotateLeft(ll n) { // Der letzte Überlebende, 0-basiert. int bits = __lg(n); - n ^= 1 << bits; - return 2 * n + 1; + n ^= 1ll << bits; + return n << 1; } diff --git a/content/other/other.tex b/content/other/other.tex index 191a6da..8896962 100644 --- a/content/other/other.tex +++ b/content/other/other.tex @@ -18,9 +18,9 @@ \begin{expandtable}
\begin{tabularx}{\linewidth}{|lR|}
\hline
- Addition & \code{__builtin_saddll_overflow(a, b, &c)} \\
- Subtraktion & \code{__builtin_ssubll_overflow(a, b, &c)} \\
- Multiplikation & \code{__builtin_smulll_overflow(a, b, &c)} \\
+ Addition & \code{__builtin_saddll_overflow(a, b, \&c)} \\
+ Subtraktion & \code{__builtin_ssubll_overflow(a, b, \&c)} \\
+ Multiplikation & \code{__builtin_smulll_overflow(a, b, \&c)} \\
\hline
\end{tabularx}
\end{expandtable}
@@ -30,9 +30,9 @@ \begin{expandtable}
\begin{tabularx}{\linewidth}{|Ll|}
\hline
- Bit an Position j lesen & \code{(x & (1 << j)) != 0} \\
- Bit an Position j setzten & \code{x |= (1 << j)} \\
- Bit an Position j löschen & \code{x &= ~(1 << j)} \\
+ Bit an Position j lesen & \code{(x \& (1 << j)) != 0} \\
+ Bit an Position j setzen & \code{x |= (1 << j)} \\
+ Bit an Position j löschen & \code{x \&= ~(1 << j)} \\
Bit an Position j flippen & \code{x ^= (1 << j)} \\
Anzahl an führenden nullen ($x \neq 0$) & \code{__builtin_clzll(x)} \\
Anzahl an schließenden nullen ($x \neq 0$) & \code{__builtin_ctzll(x)} \\
@@ -67,9 +67,7 @@ \paragraph{Quadrangle inequality} Die Bedingung $\forall a\leq b\leq c\leq d:
C[a][d] + C[b][c] \geq C[a][c] + C[b][d]$ ist hinreichend für beide Optimierungen.
- \paragraph{Sum over Subsets DP} $\text{res}[\text{mask}]=\sum_{i\subseteq\text{mask}}\text{in}[i]$.
- Für Summe über Supersets \code{res} einmal vorher und einmal nachher reversen.
- \sourcecode{other/sos.cpp}
+ \paragraph{Sum over Subsets DP} Siehe \emph{or} Transform, Seite \pageref{fft}.
\end{algorithm}
\begin{algorithm}{Fast Subset Sum}
@@ -82,12 +80,12 @@ \sourcecode{other/pbs.cpp}
\end{algorithm}
+\columnbreak
\begin{algorithm}{Josephus-Problem}
$n$ Personen im Kreis, jeder $k$-te wird erschossen.
\begin{description}
\item[Spezialfall $\boldsymbol{k=2}$:] Betrachte $n$ Binär.
- Für $n = 1b_1b_2b_3..b_n$ ist $b_1b_2b_3..b_n1$ die Position des letzten Überlebenden.
- (Rotiere $n$ um eine Stelle nach links)
+ Für $n = 1b_1b_2b_3..b_n$ ist $b_1b_2b_3..b_n0$ die Position des letzten Überlebenden.
\end{description}
\sourcecode{other/josephus2.cpp}
@@ -98,7 +96,6 @@ Also: $F(n,k) = (F(n-1,k)+k)\%n$. Basisfall: $F(1,k) = 0$.
\end{description}
\sourcecode{other/josephusK.cpp}
- \textbf{Beachte bei der Ausgabe, dass die Personen im ersten Fall von $\boldsymbol{1, \ldots, n}$ nummeriert sind, im zweiten Fall von $\boldsymbol{0, \ldots, n-1}$!}
\end{algorithm}
\begin{algorithm}[optional]{Zeileneingabe}
@@ -127,7 +124,7 @@ c'(u,v)&=c(u,v)-d(u,v)&c'(t,s)&=x
\end{align*}
Löse Fluss auf $G'$ mit \textsc{Dinic's Algorithmus}, wenn alle Kanten von $s'$ saturiert sind ist der Fluss in $G$ gültig. $x$ beschränkt den Fluss in $G$ (Binary-Search für minflow, $\infty$ sonst).
- \item \textbf{\textsc{Johnsons} Reweighting Algorithmus:}
+ \item \textbf{\textsc{Johnson}s Reweighting Algorithm:}
Initialisiere alle Entfernungen mit \texttt{d[i] = 0}. Berechne mit \textsc{Bellmann-Ford} kürzeste Entfernungen.
Falls es einen negativen Zyklus gibt abrrechen.
Sonst ändere die Gewichte von allen Kanten \texttt{(u,v)} im ursprünglichen Graphen zu \texttt{d[u]+w[u,v]-d[v]}.
@@ -186,8 +183,8 @@ [X/G] = \frac{1}{\vert G \vert} \sum_{g \in G} m^{\#(g)}
\]
- \item \textbf{Verteilung von Primzahlen:}
- Für alle $n \in \mathbb{N}$ gilt: Ex existiert eine Primzahl $p$ mit $n \leq p \leq 2n$.
+ \item \textbf{\textsc{Bertrand}sches Postulat:}
+ Für alle $n \in \mathbb{N}$ gilt: Ex existiert eine Primzahl $p$ mit $n < p \leq 2n$.
\item \textbf{Satz von \textsc{Kirchhoff}:}
Sei $G$ ein zusammenhängender, ungerichteter Graph evtl. mit Mehrfachkanten.
@@ -199,7 +196,7 @@ \newline
Entferne letzte Zeile und Spalte und berechne Betrag der Determinante.
- \item \textbf{\textsc{Dilworths}-Theorem:}
+ \item \textbf{\textsc{Dilworth}'s Theorem:}
Sei $S$ eine Menge und $\leq$ eine partielle Ordnung ($S$ ist ein Poset).
Eine \emph{Kette} ist eine Teilmenge $\{x_1,\ldots,x_n\}$ mit $x_1 \leq \ldots \leq x_n$.
Eine \emph{Partition} ist eine Menge von Ketten, sodass jedes $s \in S$ in genau einer Kette ist.
@@ -211,15 +208,15 @@ Berechnung: Maximales Matching in bipartitem Graphen.
Dupliziere jedes $s \in S$ in $u_s$ und $v_s$.
Falls $x \leq y$, füge Kante $u_x \to v_y$ hinzu.
- Wenn Matching zu langsam ist, versuche Struktur des Posets auszunutzen und evtl. anders eine maximale Anitkette zu finden.
+ Wenn Matching zu langsam ist, versuche Struktur des Posets auszunutzen und evtl. anders eine maximale Antikette zu finden.
- \item \textbf{\textsc{Turan}'s-Theorem:}
+ \item \textbf{\textsc{Tur\'an}'s Theorem:}
Die Anzahl an Kanten in einem Graphen mit $n$ Knoten der keine clique der größe $x+1$ enthält ist:
\begin{align*}
ext(n, K_{x+1}) &= \binom{n}{2} - \left[\left(x - (n \bmod x)\right) \cdot \binom{\floor{\frac{n}{x}}}{2} + \left(n\bmod x\right) \cdot \binom{\ceil{\frac{n}{x}}}{2}\right]
\end{align*}
- \item \textbf{\textsc{Euler}'s-Polyedersatz:}
+ \item \textbf{\textsc{Euler}scher Polyedersatz:}
In planaren Graphen gilt $n-m+f-c=1$.
\item \textbf{\textsc{Pythagoreische Tripel}:}
diff --git a/content/other/pbs.cpp b/content/other/pbs.cpp index f4db2fd..e6bfeac 100644 --- a/content/other/pbs.cpp +++ b/content/other/pbs.cpp @@ -7,7 +7,7 @@ while (true) { focus.emplace_back((low[i] + high[i]) / 2, i); }} if (focus.empty()) break; - sort(all(focus)); + ranges::sort(focus); // reset simulation for (int step = 0; auto [mid, i] : focus) { diff --git a/content/other/sos.cpp b/content/other/sos.cpp deleted file mode 100644 index 01bc44c..0000000 --- a/content/other/sos.cpp +++ /dev/null @@ -1,6 +0,0 @@ -vector<ll> res(in); -for (int i = 1; i < sz(res); i *= 2) { - for (int mask = 0; mask < sz(res); mask++){ - if (mask & i) { - res[mask] += res[mask ^ i]; -}}} diff --git a/content/other/timed.cpp b/content/other/timed.cpp index b3ed4ef..a3ede29 100644 --- a/content/other/timed.cpp +++ b/content/other/timed.cpp @@ -1,3 +1,3 @@ int times = clock(); //run for 900ms -while (1000*(clock()-times)/CLOCKS_PER_SEC < 900) {...} +while (1000*(clock()-times)/CLOCKS_PER_SEC < 900) { ... } diff --git a/content/string/ahoCorasick.cpp b/content/string/ahoCorasick.cpp index 390d16d..d738961 100644 --- a/content/string/ahoCorasick.cpp +++ b/content/string/ahoCorasick.cpp @@ -4,7 +4,8 @@ struct AhoCorasick { int suffix = 0, ch, cnt = 0; array<int, ALPHABET_SIZE> nxt = {}; - vert(int p, int c) : suffix(-p), ch(c) {fill(all(nxt), -1);} + vert(int p, int c): + suffix(-p), ch(c) { ranges::fill(nxt, -1); } }; vector<vert> aho = {{0, -1}}; @@ -13,7 +14,7 @@ struct AhoCorasick { for (auto c : s) { int idx = c - OFFSET; if (aho[v].nxt[idx] == -1) { - aho[v].nxt[idx] = sz(aho); + aho[v].nxt[idx] = ssize(aho); aho.emplace_back(v, idx); } v = aho[v].nxt[idx]; @@ -37,9 +38,9 @@ struct AhoCorasick { vector<vector<int>> adj; vector<ll> dp; void buildGraph() { - adj.resize(sz(aho)); - dp.assign(sz(aho), 0); - for (int i = 1; i < sz(aho); i++) { + adj.resize(ssize(aho)); + dp.assign(ssize(aho), 0); + for (int i = 1; i < ssize(aho); i++) { adj[getSuffix(i)].push_back(i); }} diff --git a/content/string/deBruijn.cpp b/content/string/deBruijn.cpp index e829137..545dde7 100644 --- a/content/string/deBruijn.cpp +++ b/content/string/deBruijn.cpp @@ -1,7 +1,7 @@ string deBruijn(int n, char mi = '0', char ma = '1') { string res, c(1, mi); do { - if (n % sz(c) == 0) res += c; + if (n % ssize(c) == 0) res += c; } while(next(c, n, mi, ma)); return res; } diff --git a/content/string/duval.cpp b/content/string/duval.cpp index 253bae1..de94ebd 100644 --- a/content/string/duval.cpp +++ b/content/string/duval.cpp @@ -1,8 +1,8 @@ vector<pair<int, int>> duval(const string& s) { vector<pair<int, int>> res; - for (int i = 0; i < sz(s);) { + for (int i = 0; i < ssize(s);) { int j = i + 1, k = i; - for (; j < sz(s) && s[k] <= s[j]; j++) { + for (; j < ssize(s) && s[k] <= s[j]; j++) { if (s[k] < s[j]) k = i; else k++; } @@ -15,5 +15,5 @@ vector<pair<int, int>> duval(const string& s) { int minrotation(const string& s) { auto parts = duval(s+s); for (auto [l, r] : parts) { - if (r >= sz(s)) return l; + if (r >= ssize(s)) return l; }} diff --git a/content/string/kmp.cpp b/content/string/kmp.cpp index 421479e..a354aa7 100644 --- a/content/string/kmp.cpp +++ b/content/string/kmp.cpp @@ -1,7 +1,7 @@ vector<int> kmpPreprocessing(const string& sub) { - vector<int> b(sz(sub) + 1); + vector<int> b(ssize(sub) + 1); b[0] = -1; - for (int i = 0, j = -1; i < sz(sub);) { + for (int i = 0, j = -1; i < ssize(sub);) { while (j >= 0 && sub[i] != sub[j]) j = b[j]; b[++i] = ++j; } @@ -9,10 +9,10 @@ vector<int> kmpPreprocessing(const string& sub) { } vector<int> kmpSearch(const string& s, const string& sub) { vector<int> result, pre = kmpPreprocessing(sub); - for (int i = 0, j = 0; i < sz(s);) { + for (int i = 0, j = 0; i < ssize(s);) { while (j >= 0 && s[i] != sub[j]) j = pre[j]; i++; j++; - if (j == sz(sub)) { + if (j == ssize(sub)) { result.push_back(i - j); j = pre[j]; }} diff --git a/content/string/longestCommonSubsequence.cpp b/content/string/longestCommonSubsequence.cpp index 6c9ea44..14ca62c 100644 --- a/content/string/longestCommonSubsequence.cpp +++ b/content/string/longestCommonSubsequence.cpp @@ -1,12 +1,12 @@ string lcss(const string& a, const string& b) { - vector<vector<int>> m(sz(a) + 1, vector<int>(sz(b) + 1)); - for (int i = sz(a) - 1; i >= 0; i--) { - for (int j = sz(b) - 1; j >= 0; j--) { + vector<vector<int>> m(ssize(a) + 1, vector<int>(ssize(b) + 1)); + for (int i = ssize(a) - 1; i >= 0; i--) { + for (int j = ssize(b) - 1; j >= 0; j--) { if (a[i] == b[j]) m[i][j] = 1 + m[i+1][j+1]; else m[i][j] = max(m[i+1][j], m[i][j+1]); }} // Für die Länge: return m[0][0]; string res; - for (int j = 0, i = 0; j < sz(b) && i < sz(a);) { + for (int j = 0, i = 0; j < ssize(b) && i < ssize(a);) { if (a[i] == b[j]) res += a[i++], j++; else if (m[i][j+1] > m[i+1][j]) j++; else i++; diff --git a/content/string/lyndon.cpp b/content/string/lyndon.cpp index e44379b..cb477d4 100644 --- a/content/string/lyndon.cpp +++ b/content/string/lyndon.cpp @@ -1,5 +1,5 @@ bool next(string& s, int maxLen, char mi = '0', char ma = '1') { - for (int i = sz(s), j = sz(s); i < maxLen; i++) + for (int i = ssize(s), j = ssize(s); i < maxLen; i++) s.push_back(s[i % j]); while(!s.empty() && s.back() == ma) s.pop_back(); if (s.empty()) { diff --git a/content/string/manacher.cpp b/content/string/manacher.cpp index 112bd55..9fa2991 100644 --- a/content/string/manacher.cpp +++ b/content/string/manacher.cpp @@ -1,9 +1,9 @@ vector<int> manacher(const string& t) { //transforms "aa" to ".a.a." to find even length palindromes - string s(sz(t) * 2 + 1, '.'); - for (int i = 0; i < sz(t); i++) s[2 * i + 1] = t[i]; + string s(ssize(t) * 2 + 1, '.'); + for (int i = 0; i < ssize(t); i++) s[2 * i + 1] = t[i]; - int mid = 0, r = 0, n = sz(s); + int mid = 0, r = 0, n = ssize(s); vector<int> pal(n); for (int i = 1; i < n - 1; i++) { if (r > i) pal[i] = min(r - i, pal[2 * mid - i]); diff --git a/content/string/rollingHash.cpp b/content/string/rollingHash.cpp index 6e914aa..1157cb7 100644 --- a/content/string/rollingHash.cpp +++ b/content/string/rollingHash.cpp @@ -14,5 +14,5 @@ struct Hash { return (pref[r] - mul(power[r-l], pref[l]) + M) % M; } - static ll mul(__int128 a, ll b) {return a * b % M;} + static ll mul(__int128 a, ll b) { return a * b % M; } }; diff --git a/content/string/rollingHashCf.cpp b/content/string/rollingHashCf.cpp index 84b2e4e..c08a9d3 100644 --- a/content/string/rollingHashCf.cpp +++ b/content/string/rollingHashCf.cpp @@ -13,5 +13,5 @@ struct Hash { return (pref[r] - mul(power[r-l], pref[l]) + M) % M; } - static ll mul(__int128 a, ll b) {return a * b % M;} + static ll mul(__int128 a, ll b) { return a * b % M; } }; diff --git a/content/string/string.tex b/content/string/string.tex index bedabfb..0e482bf 100644 --- a/content/string/string.tex +++ b/content/string/string.tex @@ -63,21 +63,21 @@ \end{algorithm} \clearpage -\begin{algorithm}{Lyndon und De-Bruijn} +\begin{algorithm}{\textsc{Lyndon} und \textsc{De-Bruijn}} \begin{itemize} - \item \textbf{Lyndon-Wort:} Ein Wort das lexikographisch kleiner ist als jede seiner Rotationen. - \item Jedes Wort kann \emph{eindeutig} in eine nicht ansteigende Folge von Lyndon-Worten zerlegt werden. - \item Für Lyndon-Worte $u, v$ mit $u<v$ gilt, dass $uv$ auch ein Lyndon-Wort ist. + \item \textbf{\textsc{Lyndon}-Wort:} Ein Wort das lexikographisch kleiner ist als jede seiner Rotationen. + \item Jedes Wort kann \emph{eindeutig} in eine nicht ansteigende Folge von \textsc{Lyndon}-Worten zerlegt werden. + \item Für \textsc{Lyndon}-Worte $u, v$ mit $u<v$ gilt, dass $uv$ auch ein \textsc{Lyndon}-Wort ist. \end{itemize} \begin{methods} - \method[, Durchschnitt $\Theta(1)$]{next}{lexikographisch nächstes Lyndon-Wort}{n} - \method{duval}{zerlegt $s$ in Lyndon-Worte}{n} + \method[, Durchschnitt $\Theta(1)$]{next}{lexikographisch nächstes \textsc{Lyndon}-Wort}{n} + \method{duval}{zerlegt $s$ in \textsc{Lyndon}-Worte}{n} \method{minrotation}{berechnet kleinste Rotation von $s$}{n} \end{methods} \sourcecode{string/lyndon.cpp} \sourcecode{string/duval.cpp} \begin{itemize} - \item \textbf{De-Bruijn-Sequenze $\boldsymbol{B(\Sigma, n)}$:}~~~ein Wort das jedes Wort der Länge $n$ genau einmal als substring enthält (und minimal ist). Wobei $B(\Sigma, n)$ zyklisch betrachtet wird. + \item \textbf{\textsc{De-Bruijn}-Sequenz $\boldsymbol{B(\Sigma, n)}$:}~~~ein Wort das jedes Wort der Länge $n$ genau einmal als substring enthält (und minimal ist). Wobei $B(\Sigma, n)$ zyklisch betrachtet wird. \item es gibt $\frac{(k!)^{k^{n-1}}}{k^{n}}$ verschiedene $B(\Sigma, n)$ \item $B(\Sigma, n)$ hat Länge $\abs{\Sigma}^n$ \end{itemize} diff --git a/content/string/suffixArray.cpp b/content/string/suffixArray.cpp index 8b698d2..65bbb38 100644 --- a/content/string/suffixArray.cpp +++ b/content/string/suffixArray.cpp @@ -4,22 +4,22 @@ struct SuffixArray { vector<int> SA, LCP; vector<vector<int>> P; - SuffixArray(const string& s) : n(sz(s)), SA(n), LCP(n), + SuffixArray(const string& s) : n(ssize(s)), SA(n), LCP(n), P(__lg(2 * n - 1) + 1, vector<int>(n)) { - P[0].assign(all(s)); - iota(all(SA), 0); - sort(all(SA), [&](int a, int b) {return s[a] < s[b];}); + P[0].assign(begin(s), end(s)); + iota(begin(SA), end(SA), 0); + ranges::sort(SA, {}, [&](int x) { return s[x]; }); vector<int> x(n); for (int k = 1, c = 1; c < n; k++, c *= 2) { - iota(all(x), n - c); + iota(begin(x), end(x), n - c); for (int ptr = c; int i : SA) if (i >= c) x[ptr++] = i - c; vector<int> cnt(k == 1 ? MAX_CHAR : n); for (int i : P[k-1]) cnt[i]++; - partial_sum(all(cnt), begin(cnt)); + partial_sum(begin(cnt), end(cnt), begin(cnt)); for (int i : x | views::reverse) SA[--cnt[P[k-1][i]]] = i; - auto p = [&](int i) {return i < n ? P[k-1][i] : -1;}; + auto p = [&](int i) { return i < n ? P[k-1][i] : -1; }; for (int i = 1; i < n; i++) { int a = SA[i-1], b = SA[i]; P[k][b] = P[k][a] + (p(a) != p(b) || p(a+c) != p(b+c)); @@ -27,10 +27,11 @@ struct SuffixArray { for (int i = 1; i < n; i++) LCP[i] = lcp(SA[i-1], SA[i]); } - int lcp(int x, int y) {//x & y are text-indices, not SA-indices + // x & y are text-indices, not SA-indices + int lcp(int x, int y) { if (x == y) return n - x; int res = 0; - for (int i = sz(P) - 1; i >= 0 && max(x, y) + res < n; i--) { + for (int i = ssize(P)-1; i >= 0 && max(x, y) + res < n; i--){ if (P[i][x + res] == P[i][y + res]) res |= 1 << i; } return res; diff --git a/content/string/suffixAutomaton.cpp b/content/string/suffixAutomaton.cpp index 9a68cb3..f9aa80b 100644 --- a/content/string/suffixAutomaton.cpp +++ b/content/string/suffixAutomaton.cpp @@ -4,20 +4,20 @@ struct SuffixAutomaton { struct State { int len, link = -1; array<int, ALPHABET_SIZE> nxt; // map if large Alphabet - State(int l) : len(l) {fill(all(nxt), -1);} + State(int l): len(l) { ranges::fill(nxt, -1); } }; vector<State> st = {State(0)}; int cur = 0; SuffixAutomaton(const string& s) { - st.reserve(2 * sz(s)); + st.reserve(2 * ssize(s)); for (auto c : s) extend(c - OFFSET); } void extend(int c) { int p = cur; - cur = sz(st); + cur = ssize(st); st.emplace_back(st[p].len + 1); for (; p != -1 && st[p].nxt[c] < 0; p = st[p].link) { st[p].nxt[c] = cur; @@ -33,9 +33,9 @@ struct SuffixAutomaton { st.back().link = st[q].link; st.back().nxt = st[q].nxt; for (; p != -1 && st[p].nxt[c] == q; p = st[p].link) { - st[p].nxt[c] = sz(st) - 1; + st[p].nxt[c] = ssize(st) - 1; } - st[q].link = st[cur].link = sz(st) - 1; + st[q].link = st[cur].link = ssize(st) - 1; }}} vector<int> calculateTerminals() { @@ -49,7 +49,7 @@ struct SuffixAutomaton { // Pair with start index (in t) and length of LCS. pair<int, int> longestCommonSubstring(const string& t) { int v = 0, l = 0, best = 0, bestp = -1; - for (int i = 0; i < sz(t); i++) { + for (int i = 0; i < ssize(t); i++) { int c = t[i] - OFFSET; while (v > 0 && st[v].nxt[c] < 0) { v = st[v].link; diff --git a/content/string/suffixTree.cpp b/content/string/suffixTree.cpp index 7112f39..6362c3e 100644 --- a/content/string/suffixTree.cpp +++ b/content/string/suffixTree.cpp @@ -11,12 +11,12 @@ struct SuffixTree { SuffixTree(const string& s_) : s(s_) { needsSuffix = remainder = curVert = curEdge = curLen = 0; pos = -1; - for (int i = 0; i < sz(s); i++) extend(); + for (int i = 0; i < ssize(s); i++) extend(); } int newVert(int start, int end) { tree.push_back({start, end, 0, {}}); - return sz(tree) - 1; + return ssize(tree) - 1; } void addSuffixLink(int vert) { @@ -42,7 +42,7 @@ struct SuffixTree { while (remainder) { if (curLen == 0) curEdge = pos; if (!tree[curVert].nxt.count(s[curEdge])) { - int leaf = newVert(pos, sz(s)); + int leaf = newVert(pos, ssize(s)); tree[curVert].nxt[s[curEdge]] = leaf; addSuffixLink(curVert); } else { @@ -56,7 +56,7 @@ struct SuffixTree { int split = newVert(tree[nxt].start, tree[nxt].start + curLen); tree[curVert].nxt[s[curEdge]] = split; - int leaf = newVert(pos, sz(s)); + int leaf = newVert(pos, ssize(s)); tree[split].nxt[s[pos]] = leaf; tree[nxt].start += curLen; tree[split].nxt[s[tree[nxt].start]] = nxt; @@ -69,4 +69,4 @@ struct SuffixTree { } else { curVert = tree[curVert].suf ? tree[curVert].suf : 0; }}} -};
\ No newline at end of file +}; diff --git a/content/string/trie.cpp b/content/string/trie.cpp index 03cf947..db39c43 100644 --- a/content/string/trie.cpp +++ b/content/string/trie.cpp @@ -3,7 +3,7 @@ constexpr int ALPHABET_SIZE = 2; struct node { int words, ends; array<int, ALPHABET_SIZE> nxt; - node() : words(0), ends(0) {fill(all(nxt), -1);} + node(): words(0), ends(0) { ranges::fill(nxt, -1); } }; vector<node> trie = {node()}; @@ -13,7 +13,7 @@ int traverse(const vector<int>& word, int x) { if (id < 0 || (trie[id].words == 0 && x <= 0)) return -1; trie[id].words += x; if (trie[id].nxt[c] < 0 && x > 0) { - trie[id].nxt[c] = sz(trie); + trie[id].nxt[c] = ssize(trie); trie.emplace_back(); } id = trie[id].nxt[c]; diff --git a/content/string/z.cpp b/content/string/z.cpp index 069fa38..0d8cafb 100644 --- a/content/string/z.cpp +++ b/content/string/z.cpp @@ -1,5 +1,5 @@ vector<int> Z(const string& s) { - int n = sz(s); + int n = ssize(s); vector<int> z(n); for (int i = 1, x = 0; i < n; i++) { z[i] = max(0, min(z[i - x], x + z[x] - i)); diff --git a/content/tcr.tex b/content/tcr.tex index 6d849d5..46a9a6a 100644 --- a/content/tcr.tex +++ b/content/tcr.tex @@ -6,12 +6,14 @@ ]{scrartcl} % General information. -\newcommand{\teamname}{Kindergarten Timelimit} +\newcommand{\teamname}{Infinite Loopers} \newcommand{\university}{Karlsruhe Institute of Technology} % Options \newif\ifoptional -%\optionaltrue +\ifdefined\OPTIONAL + \optionaltrue +\fi % Font encoding. \usepackage[T1]{fontenc} @@ -44,6 +46,7 @@ % Content. \begin{multicols*}{3} + \raggedcolumns \input{datastructures/datastructures} \input{graph/graph} \input{geometry/geometry} @@ -54,12 +57,6 @@ \input{other/other} \input{template/template} \clearpage - \ifodd\value{page} - \else - \null - \thispagestyle{empty} - \clearpage - \fi \input{tests/test} \end{multicols*} \end{document} diff --git a/content/template/template.cpp b/content/template/template.cpp index 7430d23..7c92f09 100644 --- a/content/template/template.cpp +++ b/content/template/template.cpp @@ -1,17 +1,15 @@ #include <bits/stdc++.h> using namespace std; -#define tsolve int t; cin >> t; while(t--) solve -#define all(x) ::begin(x), ::end(x) -#define sz(x) (ll)::size(x) - +using ii = pair<int, int>; +using vi = vector<int>; using ll = long long; using ld = long double; -void solve() {} +void solve() { +} int main() { - cin.tie(0)->sync_with_stdio(false); - cout << setprecision(16); + cin.tie(0)->sync_with_stdio(0); solve(); } |
